var/home/core/zuul-output/0000755000175000017500000000000015136656266014545 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136671263015503 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000333064315136671147020275 0ustar corecoregr{ikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIsdr.k9GffHD JVEڤ펯_ˎ6Ϸ7+%f?長ox[o8W5~𒆷7̗8zTY\].f}嗷ovϷw_>on3cvX~egQBeH,nWb m/m}*L~AzHevp7뼝ca㑔`e0I1Q!&ѱ[/o^{W-{t3_U|6 x)K#/5ΌR ώI8&xėv=E|;FmZl8T*v (6pk**+ Le*gUWi [ӊg*XCF*Adv cXk?`QlrTvb)E,s)Wɀ;$#LcdHM)&c(ޘzkQ5 @:}ɏ "'P8[aN Ⱥw^eD6'Ύȟ >Kdg?y7|&#)3+o,2s%R>!%*XC7LnC~YB ɻ!@J$ty#&i 5ܘ=ЂK]IIɻ6v+(n^RϚOGO;5p Cj·1z_j( ,"z-Ee}thǣAM/+׼,Vdj:qw <ws~^B.Ǔg'AS'E`hmsJU # DuT%ZPt]WďPv`s6ƙR*- F? xtfj>PWȹl;M@v4Ifk5Þ_P]2@vN (C9yO|$UvވkZoIfzᑇy ^t }|#qKrdK\D2s&[#bE(mV9ىrſ~I=o嚲W9ȝQEkT/*BR =v*.h4(^&-Wg̫b]OBEFδW~N 97;Zp0s]UIĀg)4 B^S4t; * (DnAm_`c46A>hPr0ιӦ q:Np8>R'8::8g'h"M{qd n;ָPZGa(X(2*91nD^W0/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg ِ0A_Fݗw)Q~}% ;]/ľv%T&hoP~(*ט*y &הԟ,Tصp&NI%`t3Vi= qke5@eTR BgT9(TڢKBEV*DDQ$3gFfThmIjh}iL;R:7A}Ss8ҧ ΁weor(Ё^g׬Jyachhr=yԞ~%>]CN"ہ)Za@Ι}YJz{ɛrl4ެ˧jOC d-saܺCY "D^&M){߸}t i:Y`cФIX0$!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8'͒V,~0{gj"A, rXr*0&AD]Ax95mVXYs"(A+/_+*{b }@UP*acԇ&~hb[nӉ>'݌ylSO2<쿫dIc*Qqk&60XdGY!D ' @{!b4ִ s Exb 5%oR[^mߒ'&YIL{3ilLJaXReQP2$TbgK aPS`xiP(/T[FZx[|mi G&GioW&:,,$.Ɏ\`UXSyZj 6&:$)w8щc.֐ovE^lKKiw+ڍ[I?TPht /˿g/5n]FhNU˿oۂ6C9C7sn,kje*;iΓG| A7yTJ$KOL-aP+;;%+_6'Sr|@2nQ{aK|bjܒ^o(מO80$QxBcX; m+`ùXzC>?5En5u@ߙXs`y_C}Q v,{*isu CK >Y%LwM*t{zƝ$dߋ{Ny[$ {im @tBODɆj>0st\t@HTu( v e`H*{Ögڌ:8cΗ|,Mk{_'֜dw$F1c qW zǽ0 2mK:ȔsGdurWMF*֢v^t+`F P |ɧ<Ғ8_iqEGЮb}$B#fethBE;1"l r  B+R6QS8)c yO[E}LiN9{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$aPo22ݡjR:g?m@ڤBN24مQz$XvوPkWn}oo,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 ;`Jz+ϟMn?!ԫ5H&=JkܓhkB\LQ"<LxeLo4l_m24^3.{oɼʪ~75/nQ?s d|pxu\uw?=QR -Mݞίk@Pc n1æ*m$=4Dbs+J \EƄզ}@۶(ߐ/ۼ𹫘qݎt7Ym݃|M$ 6.x5 TMXbXj-P\jА޴y$j`ROA"EkuS#q * CƂ lu" yo6"3껝I~flQ~NCBX`]ڦÞhkXO _-Qy2$?T3ͤEZ긊mۘ$XD.bͮW`AީClСw5/lbl[N*t*@56."D/< {Dۥ sLxZn$N(lYiV =?_e^0)?]{ @| 6+#gPX>Bk2_@L `CZ?z3~ }[ tŪ)۲-9ֆP}b&x Uhm._O 4m6^^osVЦ+*@5Fˢg'!>$]0 5_glg}릅h:@61Xv` 5DFnx ˭jCtu,R|ۯG8`&ו:ݓ3<:~iXN9`2ŦzhѤ^ MW`c?&d.'[\]}7A[?~R6*.9t,綨 3 6DFe^u; +֡X< paan}7ftJ^%0\?mg5k][ip4@]p6Uu|܀|Kx6خQU2KTǺ.ȕPQVzWuk{n#NWj8+\[ ?yiI~fs[:.۽ '5nWppH? 8>X+m7_Z`V j[ s3nϏT=1:T <= pDCm3-b _F(/f<8sl, 0۬Z"X.~b٦G3TE.֣eմi<~ik[m9뀥!cNIl8y$~\T B "2j*ҕ;ێIs ɛqQQKY`\ +\0(FęRQ hN œ@n|Vo|6 8~J[,o%l%!%tyNO}}=ʬ-'vlQ]m"ifӠ1˟ud9)˔~BѤ]һS8]uBi( Ql{]UcLxٻa,2r(#'CDd2݄kTxn@v7^58þ Ţ&VY+yn~F8I !6WB3C%X)ybLFB%X2U6vw8uUF+X|YukXxVO(+gIQp؎Z{TcR@MSRδ~+1æ|mq՗5$B᲋eY(|*磎\Dži`dZe j'V!Mu@ KV{XץF .Jg< ƜINs:b zĄu3=Az4 u5'og^s7`Rzu-anOIq;6z( rx߅ euPvIɦ7聀t>G;_H;2ʗ6 h6QװxmR JQUbTP2j˔Ni)C)HKE"$ӝ!@2<Bq 2oh80,kNA7,?ע|tC3.㤣TiHEIǢƅaeGF$ u2`d)/-st{E1kٌS*#¦۵_Vu3ЩpRIDr/TxF8g4sѓ{%w .ʕ+84ztT:eEK[[;0(1Q@ET0>@wY)aL5ׄӫ A^%f+[`sb˟(]m`F3 W((!5F-9]dDqL&RΖd}})7 k11 K ;%v'_3 dG8d t#MTU']h7^)O>?~?_ȿM4ə#a&Xi`O}6a-xm`8@;of,![0-7 4f kUy:M֖Esa./zʕy[/ݩqz2¼&'QxJE{cZ7C:?pM z*"#窾+ HsOt۩%͟A498SwWv|jNQ=-gJ8@o2Ƙk's~4Z(I8!H- $B*hN/ٟC:< iuKVM{M9$1#HR1(?b-ڈFalG*|EX}-HAP'Hҷ$qM9(AHx!nAF 26qx}dP!NZgҽ9l*(H Žڒ;̼o|%D Ɖ`Pj . ֈ,ixp`ttOKBDޘGGaLA2s0(G2E<I:xsB6<*d42I:<ŋu#~qu}hW<2~sQ3W.&lnľU74c?MՏڣW@ -N*CB=i3,qjGkUſu2k Cb8hs&sM@-=X(i7=nAHe%ISd$&iA|i MiݸT{t]j顒x.Ƞ"m@Hy_Iy/)j|s#RGI!dTKL&4K>#stV \G~~]gaSZNg8>e!^f%cYr]qs:"̊[isXa]d+"z=x7p.fZCg_Ys;pE&\U}ܪch])qKYAـhhdEnU14.& * QIQs[rԩN·k83֖8Uuqu_48dHܥlWw q>fu6+'}x{u\Vee9]`3=?,ڼ"ϴq*(A/ֆ:1IC8qc3SR&.w,qk>MPs+Xh4iyvGRQ=]te]U+2gSt!8iIۛ*JgE7LGoƟ\bC}O i ycK1YhO6/g:KT sPv6l#uN|!"VS^΄tcDp"'0޾{1xCNQ1G2})*'>fC۝'*)"5.E2IeD 2.ZdrN6Uœ=/}J o[aG%qf*WU^k1f3ڣjm׃>ƽl' ,Τ9)%@ 7i!eҎ\,f+,Ąt8~ڳ~?[\FV.A'!,iGow5['YToҍf5ޓ[he޿95S8DGZ@-#]z:\mOo{F-8G#%.fM8Y=Gg9R7'a9NPHGnSd?'xc W3\+s$N9tfGw1n$՘#TI{h5pfIWGev.F_{!W*qW}ӼبZ&7!5Y'3KtpŷU`<d5J{@;P~%M q 9mߺ|Ys2uƝ:e|ҳ uE UIL`F &ni4~n3?n-Kھjۅm''J[/m$g4rfrvRޝϏ_O'/g]fyrd-i-Iv#GL`,ȃ1F\$' )䉳yO=!6c+#  =J7A<+  mbhoCiWgii?\e%pf&>7V<*EzfH{]:*6M x v쳎M'.hO3p-IGh7 ܆hR ]zi2hB9'SVh*c6q?WgQ-Ns%טCE?Ge먠MD"+3@'VQXu/:.̀1җZ,{Oǔ6Jy%١BbFM=$OQYꐙ^=Zza5Wa%פG,ϒPV3^Kܿ)qbGǷVOGdaOU'tt!ƖۣRG9lhfd#]y=D?FT8F}$RD<8 ].vޜ-v:8F+Mt|g`.!! р#ݴtMƏp}3ܫ^(jL}0EƥN7OQ.8[ʔ`,Rt:po=0-ʁקiߞ/tA3)i>3Z ixUق0D Lwbw Z8uzS"\M+Nl E<^1;I32IpBiǍե sZR cku]dڙ@{@|؜J"Ҫ [焓&pMzxdysj8OZeIRk~( $LG?qކ );B̗ߐu"8c`td 1xh˂U4Uʨ}~m%f)icʚEAR6r9!Go3 "ӎ̏/k{8L%H㸡]V.lM>*2뭸 kn=V%ŐnafGw1nrC%іoU ' 4Is"]<ӻ|_>?E7_̮MBvLϸY&0Ro6Qžl+n7" 㬙g|ӡFBqOqNx^eCS73\ZS^ !c-!b"'9k I S2=`gb/W?=`]@0--VVYmKW^k?R$+{+CU )?wdWaO!j-'w0ŝl1!i~I%g{T~*?5P- j\hЪQxchKaS,xS"CV8i8'-sKB<չs">|{MC8;MƒK7f~/FXKRF>j XʝAQGsG%|*'@x6yR>ngg 枤hˍJ_S{gskI\t`綘080ƱQŀllKX@116jio?NrUѢ)*v|oãee@7.z!<7zG649tV|̢T`"T:*Da*nIClz^F6!ܠqK%$?E)~ty(uGu() C>Gn} t]2}%1NodI_Bǂ^8]3m!'(Ֆ5Q&xo 8;'J~bgEe/I du/j8MۂqdDp^Y\,8>ף|$@D.ͮl`p48in^.ž[f>M)JÌ=jO7ӑЇn-i3,1ֿ5GodgP\|bЯ݃sngir^$W v:?_ͬ5kݰw[!$qRD `MHVuV_K2k*`cKxuBG&24T}Lai 0Va(7K#ӊ!,ZDxFQO*lם>!4ӥ2 ]8â6 U`V%`!c%؎ʨTzrKh! c.}.D>)d_ 8rcu,wf2?Ǡ*_lDn}rauyFp*ɨ:UiM2r:9ct X1lmĪ o玓,R%!`hGT LYF#g<cm${|Xdu4tmtїUJ\~dc0KcMlf2?mμQ ߉J4WrSHTdp"ӹ'cJq2zPlX̯.0H!ND@UapVoGڧD5>H]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]~We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~);qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U_~z5є/rfn͝MLmc 6&)e+n7cyy{_~궼07R7wPuqpqo{ߟ+[w_uOq?u-|?WS_tOq?Eu-L_p?Cz .e ϿO*3 `Ђ6a-`kIf-s,RL-R`1eL~dپ&+IhYRczr?㐟,v~,b6)up)3K,RLW"Qd9JgT\1f3@Kh% a4x,kA k ^d kYj5Ah𚄓vXZhX1xҖ51Y +Id ZZ\C| fD>hB֡#-$+Jpሟ,Cg:6 3 xH "}C[`ӨOAFn5ʬLHϰ:N@VcyBI#Dr. "h hg ۃm-qu>V&൘ G7qi#^tҒ[JI!{q*lrD܇Gk@;oI<5xZ4xM"؇'k!>V|lk'{d+ :sXӄc)?W`*|\v aVT0"tMًcΒVz]T.C$cEp._0M`AlF̤@U' u,—rw=3}resLV&ԙy=Ejl1#XX۾;R;+[$4pjfљ lݍ3)`xvcZRT\%fNV Q)nsX }plMa~;Wi+f{v%Ζ/K 8WPll{f_WJ|8(A ä>nl"jF;/-R9~ {^'##AA:s`uih F% [U۴"qkjXS~+(f?TT)*qy+QR"tJ8۷)'3J1>pnVGITq3J&J0CQ v&P_񾅶X/)T/ϧ+GJzApU]<:Yn\~%&58IS)`0効<9ViCbw!bX%E+o*ƾtNU*v-zߞϢ +4 {e6J697@28MZXc Ub+A_Aܲ'SoO1ۀS`*f'r[8ݝYvjҩJ;}]|Bޙǖߔ 3\ a-`slԵ怕e7ːزoW|A\Qu&'9~ l|`pΕ [Q =r#vQu0 M.1%]vRat'IIc(Irw~Z"+A<sX4*X FVGA<^^7 vq&EwQű:؁6y\QbR9GuB/S5^fa;N(hz)}_vq@nu@$_DVH|08W12e_ʿd{xlzUܝlNDU j>zƖݗ&!jC`@ qэ-V Rt2m%K6dX)"]lj齔{oY:8VmS!:Wh#O0} :OVGL.xllT_oqqqLec2p;Ndck[ Rh6T#0H Q}ppS@ώ@#gƖ8sѹ e^ CZLu+."T#yrHhlكʼE-X'I^=bKߙԘ1"+< gb`[c1髰?(o$[eR6uOœ-m~)-&>883\6y 8V -qrG]~.3jsqY~ sjZ+9[rAJsT=~#02ݬf¸9Xe>sY~ ae9} x* zjC.5Wg󵸊y!1U:pU!ƔCm-7^w]斻~[hW$k sE0ڊSq:+EKٕ|dvvjjy6 æ/ML-yz,ZlQ^oAn-})xǺǍ--qcl:WLg ӁvJ[ǧc~Of+8qpçco#rCtKӫce0!Y-+cxMK-H_2:Uu*_~l_nKN`#m~R:ߙ歼!IZ5>H;0ޤ:\Tq]_\_>e˲\oUQ\Wߋ47WwߋKpwSSۘF,nC.\UߋoVEuY]^VW0R=<ު˜˻ x}[ێ'|;c^ M7 >5\-> m-8NJ\ALd!>_:h/NAC;?_ξqĎ6xMY(=ͯl~l8V0٨T zL{Ac:&$ ^CpH*DW\r2aR|=(L X1|wrO_g ux1^^V2޲jMi^b``Q#dBxV#NBk1;DAV$"*1]Y~ cZ{Fz_1|l]FNvisvb4#['.N8iqLuvMm9C!gm; IVAr!-hkD U\y6dGUa2٬c)JYUIU:ޕsǪcՅ'$0V(+ ޑeQƕ*mlʱ4ƪ(L\BI~;Z)V1j^MY^Wqfܞ%~c^8@̏V TZ'e0VXEMb?h5˂z܄m1ރ䏱?VUm\U˶}=oUɰW}rt y?/jEM0'y:'+ֿ<&" ҽ8vuؗB7 {IӒ/H'gD^~ⷃ_/ u \@kM۰q*5"A @l"dLZW/jR𒧠XZM*z)'}p5 Q_:t2rz={"Yͫj֎%oI0U{#$I c13,RAZinIM4e}KJo<,y)&f* \L'I,e@L|5uW%h㝥iZ1=P{ [z|!Y ^3#F |tWiM 2*`D F,M`"I+S~tK; w]$9jFy,Iʼ?z!m\MB$- /ߖ1ϧ֊˺Ê/ \GY}vQlxj$ m/Cxn^g)C8e"q`>fIyfoe V-RG$D7y(s$u|R3섞$N/HP'yc"[Dd}Wo#˅o楜R 5(E/r]6oH\geldڼw֋~{p$l|E6~mIL^AX̦]^"~mgN:O1o^Ϛ.M}aD˺I@؈aM~}DӸHHm`;k)&x$ iX'@?:ngiT^<>BSO{bQ$˨]@ ⊖i\:QGE׃s״k,(IEg@k1#3+sPO!m?pu|t\v}M$=}`Jtk4ME~Nn,Ѭ(pVavY33e,hP7OAj ;#dn2=#QLK@mFTXp'a4 Gˈ &ۉh p|D$B[jh*H&1Bi:[m4Z؅VTӢ i"{ mH,qTlk_80AguhKIa=!˻ ++ "v0FW&~Nsww~ɁgaW᯶ gH捭TVaD l3=feC"ĵD^m9|C, *$oSmxHe0 0mM't8牄Rx11ʖ۪O.i\z6S1ewz72Cڤ ౫-WRŨC+@6WL^}6ki\ 2 i&?Zg.gn3݈cL:`R 2QijmװC #5RkK^U |I* m%-čQ<ڞ/A/Bm/Zyp:CQTm_b^`).E*uC-,*6U~,RM=<*vCyp]5mԽ+P)*xM#[NJ0|ASm=RK)ڀK55!` p<"őj)i^~,<2x{Dw`+@ΠRF]n(k0kX{nmm^(T,Ӽ,g3vM_,}|PC\ݧN>몎̵j7M әѶ+</<5lV]WO},xz{>t'yurD-pԚv$[16wr۵F\[1yjU . 4bm\a ::LMG7խiU5YڊMubjMyng4MGw-/> ّ^w u=.D>erUodwM rU ,`Z^JG멓S8 [a!vo)iwܘr}w^ mtDCZ%[hI:*}Mp(*1!XK);>4]#ݭvOC>`ѩ:wd"=Gz]ȖFR~?-j$SE6:JYJryjJ}{~Sy}xC44}Rp}Ov'Mg՞Dg]3iRm4 HՃw/`c詜Jakb\5i8d.$"p穼u棻] lԺlжn~=OJQ}i{DIפ߻k63 M?kH]nAxl"Y) w0}i *bn ,}c/e^Ӡf_(qJY/'5e(ip{ G@z_7rtU'Wi,B?@?QZU][ad).#UеcֻOmkn gۻ  R2xn>.Ch9 8;RՄ.y!%m8|foe3D$ZB"2)c{>?y] o۸+;$%QlvIL&"%9Զ|%; eIPiT`ږD;!C1i(M<\|y2$hF'Dg˓vK.N5?Ow++G>OeU.h1F?N MLJ,פմ:$_'*k/Jɦ { ԡ7\C|kJ:?VZ~LN& 6?xIwXE Ȇ@]?J]O(.^w*ҽME`ZoItKEcCj69\ziZyGOԯ{Qομ x~7`}n$`νyKn?.5s=bkǷ@i(tH;W@Ɉ21s2PvO@ ~ t];P]P'O":x \$nX*Ҫ&B_:* @nl@j7/Y (\Ǡu;pB!vw3n3v ."!cVs^U1ȱ71yq$ P 4<15AϖhׁIa`ylDt#x#hg]sDPS(puAw~rZ,y(IgGAV"@1S׏p|,`e7$V~y%`8sw!Ӽs˨1(:"Be8< wKzCfQVQO58t*KV}ŒXڟt9 #Lw !h-]W]e d^Z\2f'PL(/^c{,+jĜrѣCR~YQ2.  B>so6ui.zXg nwC֛8Byp停s l7pE/ @nl&JM @UͿ7 ǃ%˙bn0?|5\AZ ú׈gtEh(rŮA(uRPk. "iONSbI AdVtpUjJ:]sH`I>Y ˠ;j:cr/璭\VOVHP<3Ԗ֨y~O&%[W<|>x:5dPttx hD=`K\7->ظr22#.UIҁq^=悥 -֮Uf fV\o(]VONO^J:RO$QԋzHxohGOH녃$\&W+F=_xR@YPy},ašN i/!4hߣv2qcgWtg > ~bNP0G"//bF AԼJd@M>PL5gS>\z0EcG賀Cn)U#n¢V)ap05n {T_ŝv^QDl!`Oһ |ЪP2pkӅc;řhnE\=Z4FA8lp0ד1tۦ>ޗh&egxgS!Yvj59-F$HCi> R gnXk{IG(BIjCa=Pd!s) (,Ͳ  B 9!V:˲۴cwƢ Q(Y8:!,(#h~lcih!}4Xt`?Vi{ci @[Zp4,q8vr~97T@$@eӪb|4ЅØ|g9lDn`$֩^7 pYh$&t2h`3hT)jQngOx֑8;m XNoݹ._oA,ߞV4R#)[jjoA=I*:|8{)3GF$u)gw{aPϽ~8zq xŨWE#⥼fgy66v`,̊<Ce@ki U??O4ϣ"2A|W\uAeH*^UQJa5 4V?yW#VV&cg#̮fMhsEN,C_i<' !Sﻷ}Zڱt$Է_8=RyA u~@xrl[yhyÇ{_V `g#z^WR_N,yV]DcSʄ*b>*[B H90OMW(ZuSXq*͝jeI}w[GPtq=P=k'򙯫Zt17NtvN"w Fܲ]z{"L)Rc},io OyǂYP&zʝSv SRpR?g&`%vVj8FV 0Lfuk ҽV4ꉩy'GYR&H6M^\s"#'`C:6^㗒gZ@5X\x\7qb\GELbEצvS"0d&sWVLpȌ䅳+9Zm̢]WP֞j}6|5G9Tz ˽*bi׺dt1(GG2\ڥrb#1 =eh7fӞZճD%nt!fqk7*k&y=}tlX 0`(hL$4:`˝kA S>, s"Kfx?H~9ajѵ{`FpgɔڿSE54Օ% A 00 *֬z XٍHGc9 ,Lgvm=xQ>4$# #x %l啊Rv$U`v'  4yץBs8>ٵ4 #P=à9{7nl[b=r}:`ÃpF湂HMY/_T.r>ص" mw{N3͓(C@ %K%2{Mz }k|}z-^*Z~β a =@?*'kEMWI& ʜY.]z 1??ը#\uO0x10Աoz@rcH zE: mtTaQ`4. u "{jz řjsPb3,^Q~O\b9gv{P3na(éQm:87[$ffMXhճXgrL笡lWY qaxƪu-S#\|P$pqNK!m7Z?@Hj |Q)ʟf~`‡w{g?JG&5KBmcNΈ:}XR,;1LI+צ'uV5T}O6;* X2tƻҙHpA͓˱ tIH i[VzYAZ^5Մ|$֪P""Ƥjx|c0ЩF$}ߨYdHD7j h4Gq΀O#rDyTdCDֱk$3~N0قrЌF^Pɿ#`o#OUzo_fCqRВlS٩Skv0~6wW%S $G" w.V:!f;O Ή%LQo,P?ɀ% QKj\KI90)r.2c'Z<뫸vGs*TV$)gA@Rӧ@~?_jQؖ,iǴO[4~g\6iIóL*`Ds0P3f<'}nv:Hi!f6sqs$_3_mՑnlPOrZ~=c`_6^Ne=zX5nRgjީ_oP%gwcpNL#?~1ՑGհ5v_ 4] +nŽ(sڤa^~6wp:Ҋ]$xȭ*9'@l:GS _ βJC.!ET>!fE [r3(}~ȡ8yւȅ5~6 efQ- 1Nuw?h;ttj*oi\O5XP}X6P4դHz=iK@$zkcOGӭg D + MИX2Ӵ"0~{=tQ8z 2@[$JWBZx_ :H[yf,t1i&}bPTA*gQ`2:wՖ(-hP_PtW%vQN06VA%o@魱1D?A$X-Lw-,pND _D\OX\?9'rt$B3͜gC 7:ՔLߗtӺ =E[_L/` ^&V"f. v%^ʛ/E =tHLfC$(O E.]lYi-MW΀$r& @%$H.b̠D_YmsU*PB@XϸBv]mELrqwҁP!G`h4ţ,R*W Z"M|"}那h[3b*|1U'hiK"EROTcYrL] W <.5kU1lS" bm]y(sh}2:,%p!HA\TlcXEp^*5!L!,+^oP" ruJ96)z~o dM,0P$BXD=o fڤR-WFTMÎ"tϪ:@1Q10ٱmX'H:1sh[`&P!8\<):1FM]2&(/1Uw: ZdpI=caԎZƃ'0 ]XpXH:y|ʟ{eEisފ2]|9cba%(<9En#e҃ K]b(K6eTg z! ypѧm7Vi,P :Ar+_8lR:)~q]pTN%B h!E,n9!5lZK }=hRUZCncW J^ĬMYb&L-DZnQ4*7KC]ãSe|Kq'œc43scNE 4Lԯ\-1ֹ*R3yf?v_Wif|)іy#YoS`űhCn$=1\!t@?1>5:.'T 'yTגR*m2Wy5z΋Ѿnӝ朸g0Z_?xg v QyE4=v̭*}!&hZ1x%lhݕ/]i3qUe8a;wA΍},jl`1 2*F0 QyN:G?xJWԹ6 -}n[?ZLJ7zX9'Kq% "ECh+u+{ݘ A'@){F(*+I~In肣]uFZ=(8c۹_]B)bzIk{-]J $f;b;Ҫgb8`ڤjooM%rQsyQ7u$U`.L/iyQ߿g˻.86TkIms.@4hΜ..Qo$]9?.Oa}{G16ȩa@PX?( xI`E!6G(NiP2ߗtziA̙7L_JeAs>^u4Aq#]hRDmzUJ ʶPY~kd.v'EҀ+fv4⹞kFr`i/yC(c$lQ7+8{f99ȚR26\, eh\h! %eB *{$E% 6RNF.8gJހ@Z":ElP$Kùꃏ!w85gwz$~pds:eT֒wOuR~p0slu_Wm`SH D„y1{L usɎ t.&=xX}. ~./B}0{y]+>xbaDp2`Lǭ'J7oǔ1*Q/Feu 'p$O!՘D>x\fc!:k?Ѭ؃ybGk,\[Obۑ4o1wp۟1 '\skdjH}VzoF=͗/=F^ HϽw'3~oʕ[sV]p )FPeermSxH˃K(#2ýQmYcR̈~"N6)WOc.8d8MDP>1Lp ۔6R7#(`̻2A᤿, g| OAHݹ<32t UEUw_;7.PR^AgXQ_mj8DM,sݤew}p;4dF9 NC3̶4ӹXu`RzѺ Ƕ>R#&*W8Κ^HtHgҥ8cCC㻸7]DzE!l8VH(Ⱦ 7/5",*bЋEBɳ6`B+~{!hF9~>blj"i}uKk6N^?G%~E^Lgv<Ovi~Q׳</vW_L/+9h$)}نl_3Vc5ܤA Rt9 Oj٨|^+H:8brY8h ),!%1#O$gy(aw+U_=n2v+)z&z0=, {۸B撽HC#;nȒWF>z DSbCDO^sY:_u/naWӼ |wgΏg),v{GQrbt2tf5? 4LG0$YÔݎm9@OZj+3e:{NB@=)D*v(k\X~yPwc(+,8Y4ßd6i kUkJ"H"V92< A? *Wtvu~(ׅJpr(U!ʯ@wIrqFu߮\}OdgCסdeK~:=3<~ c^zz'nG~mtQ{3]#\K148ύd>a^_^w ,N49IdzIb•9 4t4E32Y6~{u6<9 $+ h"; ze7цv|(Jg>̧ 7̓hf#EѼ!=zա b@e~_`;t:ZR5n}=hNph5 ·(c!ftf_>~4'{X #3{y|0$ bO]߀DW@(N.z/}KN{5هycƳ+PFJt_d|ꛉK=wDZTI z/wFS0G='c߬JRJU{D-s?MQ֬{IlqrP<3 \kɪ-'V`z ͹"aE^tWKۇge'}r]2.w{Wt}rR{Nտ7S̞'p?{PnQKZ[jJTy9& (3cOq q: ľ0_{_JE,&mf\zD]``u-d;l!u'BJjleISXNhb K(uPNہҪJڠ6h\u2.6zE Nc*:55]4bd9mjDXuȌЈ2WU: j;@\vEZRY,e>YͯL;_Wsw5Jv1Ԥ؏Ͷo$+]N@`A:fXmX2q7/;c nM:EdB`źץ ;\]r/+%&T3'`|) Zh7i6˾>Q4!0#$]I9V6g_y'p~')FY<ƌN{ Y/c,К. 1? 9C~&o+߀ʗ9J»`8<k,u9ftB0Վz4OZJap̬VJoEc8;ywZJy_.`L4Oٴ?X3 o퇇wP8t }bD8ZD* 3ꐳHsJM&0^+hojo-{4v,ֈIM(^ Tx.l֧TbCÏš2`p(:pq8E?)Q9V9e&g͆\ed6vd6ϙ i@1{qܴKCLaO܎Æ[<)T:ZrK *dXbkKis7lw^-NUzj `k3w=J]uT72z} lѹ1(ewJ/ʃ޺d_n;CfRπexCW 3WA-^0 o@;ek*Ф|bjEC1bߟ{տI;r+-5=C8t⛿ya+;B8=Oʋam{5n@){=@+ɞ}}F2G{&ӋO~\pACM +.zwכ@8EDx2G0pLz d{y!(Ƣ]NN #7wg|p9i 0{I_s.GV;=&n\V#JIw RR)W _{Rk5X)/>J5GׂJn@=^LBvx>'^nʿuD☜ !Tai"wW;5@Zt+A1@Q*SP6LQa p5>0ܠpzyvY~QA6Jϧ0s"|6y sӄH,&H]L5IVbb$ e BKXq ƣ$C#vzNgrws\' fy٬tƝ?Y38Hh&;eFrc0q7Bژ!mX v]5Z3vH3vd `QܚcE+>g?08LcͲȦYd!ß6K0"JjrS˂D` ƱޚӚvԈӚvtଚivІݒfsgḊt ^1KBZS'cLjg4 A+t5aG4aO^AQ0ASjĝ qs^H+oEhG1Saͯ_c8>49 Yɣϗ~ +OWP7͉(k: ޖ]6LA3+H#[pw iH5U{)3ճ(e^ޚ(Uˁy-,">"}PTć?/?6ƍ`]*}LFX->$zdw?an+䲶 8\G_o%۔LRIM 7|sx(R333MShbzѬl\rSN6@&"q.U2Q>bbpF_a1`G'ckI;fzMAmmfk(dךh6:o[Oo9 &}++P-\:@֤"K p`J349 M`?j+osXꙀ`w̢"g&qy;w*rڣ)Bes'MK3v܋.M.̿"Ͱ3L4?!fdVhdžv|VJ>zܭoۥD3޴;x5ދ.I}0W~f[P-7z~2  /"?v?J$@dO &TtoT!Pl]Ȋjmv!o澟}X0՜54c C-WrS;SGL[prj=s'Wݝ#{3S"xٻ3BVj /F?T6-fO3p#RrA__.Q//@_b"e駰_ J|zdư\ٜo}zGh$4WMO-U!g^Q-аP|poI.XM1/wgC=卣:a8goB<>Et*\GZJY 5eZn -Ǣe y0NhGkJQ(XS";]7r)ǝbnc zyksJG1[ ;v)<_ JNiCJ b+tc;lW\Ʃ?h2<ճBwDS%b>.iKKL41`KI ##c"NqVA؁/3p(11>C昿4@@G\+M36iOyLQԸ$PyB\C@j^D1"Z0/x Ih5Iq(p4Q)TPh.S )u ΎAxֳ!!qŋOoDl[_[&$?V^D鱐ׁҶn%}γeLcu6V+z@FQ%ãu 0~Ԁ2U%AB c}t-Y1`ttx[NjƉd/v0S'TO8FyoŌ X)q"7 OUVDk+Z@RJ^ Z x FX+L_ch=XYƆ9_@2R$᱈EBaRAqXjǷmK+q8L? nh;5EAQTcg,;`{dGNDt%}$:ay8ҐLQ,i-eCv )4>.pKqp^b@+OƨhzōusHG 0IaԟDŹb 'vD5=rk`IN2 2Jx@d;d 1rjx=o>+S+7(1)f7r t'!&(9'Iq@Z6Dn>&irXf'9uspZaj"L`Z55 rid*FFLDh ρʍB4*T4#4'}QgdIk%&ׁ]ix?J^=Z05~ȉ rL(:DRa*-<і=`-@i5HOBi*-i'i iE4aׂqф B#"^a%72M=u>p+0 #_a1Fd00}=oTqG^EOUv]u3>4DYnqN?WV0J c9#Bp059.TJ `Jeo?DUkCM; Izk`6R>ʛgdgpg0K`- E1H ZѶ9d'DX*/#'}>IVM{% ӾHRia{QjD-q?n WLF{]aq)`}vIDgf{+_s|/ LZN<k ۆe.'Z\ Z| ~Ԉj*RgLYҶ8^->71 ZY\ϷRnK[pp`u8cVQ/iPTI:Vs Mģ:gzq!^ ^^ib2<4a|3u O\FFk0{ݰzPJ,@8dr@~|+Y47ijxJ(-`{J5PXJ,M$g+&l$Yɜ̨PtWo 7USIʰS8R: ]Ҏo*O̙֦FJ`u!!#C Ca >0,)AR>{  '"GI@\5cK0D )2 L Q3ђk2LPk|zOuqB㚊)Qj .r~7bЌ *YJQdqQHWTUDS@80%!O:>=.8(-ABJG.U=y#'i?ҠTn t?N) Fr݀Bn"av7"`TV+.H()+iIBEeLLCp^=mXHze n7$g͵Z\ϑf x1!c2iH.[4QUu'~qXE8EwD>Ocߵ gx[ Rt'H>1b L%i482mVjg^w`s &}j66 )8WmWO LI_&k(.@ i9KB^xl^Mt}vb%jH+C۽3kXo6i֕u=L;%ںmVS{|QNy#$Bx"G k`IpUǤ9Z& %NP:"Kڻ7LPSjiM}ӥS2ܲ<ɓۘe2qu(l1ڋZğTwIJFaa,֫5QXjaCܝ_h٪[zsznkmzxeğ{V.Ymw1B4K`1M0q #(ź_Jԧ9FI1:h0>71\,?__c|WFk'~yvi*hŏ6-Mܲu䖄6 HζMy|NBb`>."n[SBI-60v}2;]P{h]Ԋk5Eؘl)YWE|~Ý$_.Nv&g33wO¶]:_]IvQ4?b&NjW/fK 3`&Tv8!6Io;sj` zym:AV6J-nУ0QӣV'a8--JoaT< ̎bĊ昡Siʱ%L;挧5rZPR+%R|j2Ds%uj pg~g\ Qژ(iֹv|vt,Cw:XA0&0ifuR~VsMif6ҥEt{R<\ڭQ1XR gZv``U<\zZ_WJ&k*{PEQeY)qn|ɋr@$qG/޿~k*JSm6=8B~5%WqmuPC G 7Bxj?γy z IFM~ zܿ"%y1 5$}zZ<4 wەO&{H~VUr8/>]{O\ B̓ s|]߼|]twGt5>''⏚HuÇUJ,K[& W=_ N{C90໕hs WNd$pe Iћe:27$zx]iy+ m|I3EOWEϨ2Flѧ/k(o9J"8۩ѝBJ*1%E}X{KTӮ6Hv% 0F]GwbG2[Znd#ՠ17 *(RS*}yR/xHRwV"n=h59odHsA20oPo9=xMPJ$hدm !oC=6ˡ@}y=jĨin$tAL>~?q (\L*$PNHLpeYhcJL~=#XY:X"eHfye)qqoZRH2.Za4R]]VKV9k͒Qh?b3`ڂQdwVwP\Zl=Q:sߦ/f(Huq}pa"ѣgxQF#^;𕺀i'HKadV"VJ@O¾'-LwVNa,V˵-Ko*fuR$^@k2_A$lS7 9ͦ$}dGk!#5w xZ|#f1bi{w;ƒ{o'pPD_[o<6lWZ| /rVy9e@˧_;hN^ODTF3\ ~|,`p䗒/>,[}[,-bohͥaA|xXn/WedSe(VS[%!»RelGee_j[?@ WMo|, txzvG14`kl6'?sYKm_V V[\ ")4J~@Iu @f67,_mt#maAw_-W\K!?hɨOXu5uW+{ltMS!ӂ)4yVEՇ{Ovi[wנm7 wRIn dzf˞qdL@}UFgٍ$3vqzy>kb鈆F|2#+kn$Gc<>ocvֈ8[}zg,%eQ݉L#L䱭ٝە/{&Ϟ#lǻ?殻>]; Eds-u$O5LRY% &m{G&$@R @Ivm'_7?,?@~ ).s&DOd`vN[WR&u}hQS$pR=oFY$Ӓ`_digB==E=PqAcP enooCz^οiIyI^eH'" 2+N2OdXu 8m GEl6L404$s?`8/ǹ>r+p$!y0 y)߃&F,kbDtuݤkWZ Fॄ%#z%eD?h @OZԻ?ʜ <.Ix4$&8D0&"G{p1 {ȧr W W=g.87$E,qo 9a-|*X^bHEZsЇ.cdiMd.wGcG>kX5x"sLQFkhSTW֚%0cV`ƑOY }JTF?\:rk|6%n 8*5&&*ʜ-D_HW$h)&XɀFhTȍr=HruȔW A,*'Bnh=U}8¤㪰8df*GLѮeh8}kV|*\U뜌[6Sj$/#46MC^1q6"3REHHt)D0b:zЪ2cx ǑOvwc8İr<'>t ^ 62%~| E>rn{BH^Z Z~ ~qD${Q?N0?_q7۲{uW[\~ƨ׽=8_~p!OS -K_rڏ']{? j%0Gqnզ+QQ94bǬ(gwFF? p?814t!LI![Oqx{qO<8#V{ _Uh0F8(Bă.:ŒY݃G>f[y?,j=|P['*|o-&0 {}"3 vn+\RY’`.u]z"s'J$gQs~E>_.kߡ/~@QC.~SPa*z1UĮDKqz_Ӱ=&%hBpW4)g;=?c`qMAtJJL-UT%8྇3|"̌xcRDd ޏƐ%%xԝã9|*ޛȋ$tJHIWVkO |tQH982*em ȧ*@3 3 % NcҠ1ks(ͻBc{-Qy_ZYbڂE"gbőOėz)twϋfY~],Ȯl 5*ɫ8Thv>BT=T\ʖ2P!c ˺abA AlFz,!F2%E("!P JRȀ]ѿKQS!r-y.jZifܸEsB7+qpls5u3/ѝC!GE>r3,y> e3]VDzw+/7}iVo4ˏLc& x(?~$?|%uE!Mu~\!+..?1b.g&hvBr VDUw:M(UYH\BZ ̺m" /z. RFhafBkbyJDu62}'PKѯ%@O\37_r-ՠ&9p8.(!|DR7Ng #%h Y`O3^E{axVC>UL0oPz/w2WPTM[ Z/=|0\PCKOk6Kb*._a h fw]D˹vF8}~ Jb;kto5^T+ IpUbY;r!Xߏȧu6o6 /<} %3-ʧ˧Y7KQ!YA6aw:(6wP{1 EpRi8ҹR`GҊczT #j\T( 5.#"(_u@FΪdLؒ .݃1> ~SB爾 H@T`Mڒ Wcggdvq70 ۆhW7!#?B \K +4Q:bLW9}E>jikM*PlM`if砿 TǀE?,GYV2; 5C`Y_,ŧը9I>Zw_B,K+ I8V C>^pxןcO(w$FD&{>ĖB,`<`)\qPC1'f}a |ꁩߚ֣sp[G |( :ʶ$NBclۏ!Q6{E9ݫ 0i@FIJ1CAf;'\po93כֿ#fr.٥ڮ9-"(02'x=zO8ZaKo8wl8!^ۏ!׃m7I~}fG')nrZ$d@z@_(HOO?0r4dt[hw?#FgD>Rv$Q{IeSVeL6", N+$("B$رqھ$j!bظmQ[9t/WWKG=hx?_s 26d=qE-#>FI0O3xVĚي"mSQeOi0Zy~Zdt[Θ05ǘD&<ؠK~r?3d@F#Kqd[sydzi5mgt{|tCw!F@7C5kbrChxTrNd ݄TM} ouFؔl-'+:088VvϲH|挦ɡDf"#̙ mNơp_8"I1(,7aCOUi~;C(7؏hthdǍ"Qϙ7c ORm1SfN#8̭FLvf.<_Ds:/# Nȹ2k'c9GrT-ot;xqFxqx!8rjZ9]WH vqR *,}"Zr15 ~SXwpkڈ{`$k@cv%\D"h|gsF}.^_0|268\/ۼzS' /?Vc2{ꈶ)Ғ aJKJۣC G?IE>rglW|t9zz?Gc+DK?J_6[\~1#>-5U@0|Dؿf"d@aT$GT$|*K`n&PƦ/:W1Ǭ Grי,@K w^pSӊhRe ϝTM̹@N*ʈu xmbDݎYȧZnp \?4*| eŽh>apJ19``XpJ|f0Jf`r|~[=->!!zLۏfأ%IQ"$܅̕v`oH'VucbeZy.3np ]#8ݕi̲T%r*7S(tA.jdPYUCb8/;`=s'qϘ-,LJQFĀmQ26zi5Uz}q0z&EwIr㠮W>BUg6wKYD;HYK*Pt= ChGbl7iSPe9/Yៅ㬰a2?LC>_.\6 ar;0T4I:݄c#Wߗt$٘la : :>~nVd*Uei'By0/P?DGj:$3l U c&ӹCmL,`5 "V,聟:wA&Wџ7WܸDdl]LIy?-ǀMQ.8u/Ir:v"pvE{#x* Uu:ZQ6Zts:YbUNgʥKV)Nr}\9a#ZlHqHU8cő5ȯy/dā{nfĥM9}zN{M}f`\X+5樷챬Ju[S++Uc:G7yt|ıAe[x$M=j{ ?^mZXybl!L@?#qN"YOcFjtCqj-b@^r%jէc4i"DRR03c@p9b`E"2ˬ02K >tӀȯfc|/ tLc:z8>/W@[x$&r A,) 3">mA(9qf\؊0FS kE5ZG vz14h(Q},ginbg'0[zn%β~sWa3h:w*`yTyxrthZx&cl0)dQ ˈimyx^'U$a}+t!gUL2.E6ܯ Sq<'o~1.>_|W>xw 9-'zy~(c;'fVAd!(+Ÿ&a 571Jgt]tɔ?4|MqVW°iu-PK)-eksKLxXoa_RKì(,YO Zx|Հҳ7cgcd 5xMVCtP+3"߼w2v~ZicSQ08ʋN0 NEOʋ0Ndr|_eA@ C)* M:^yUn68,:(fMU+ɔ҅1Zx#T}$}>9EYNF'rc}? :}P(9^gPZ./O/U[O>Е블NC 6lA19gX+4G-<wja}0Iro|Rq'_ ;?@ voٜuɱ {n=<F siǪ.e͌*l:zT|?9l\%u. +Af*sB9zqgޅsW+TN&W)R £g+pp>L !,]£giӹ Z(`s$ LX84^%_v:Ff<1r{6ܡ1T"+ŪX (t0z!ٹ',^AS;08MN)+-<&"]K6a*sn\DsNg&1r1'T7V:k»"ϖN]MAi̟u1weP'\_UTFsUyrwt-UJ xSS*?M'`j#Z,O?1l}ڮTX?rAU?Z6 W*1ǧWᶟ?ՓQ]~4a ލՓIhX1hlػ;:0T廻;L,)]_pV8[Ɓ·J;$}w4ۿ#8kE2Wh<`SGάcί?f~x3mYi9ok Bk(LW4ql#~uDUeAw}׋a1Z2.^R| ibΚsokq: Tos#a L60fTc7=Y+#;Z;!_P%ƂLa2E!3w^Vwk n!c} 7~EpVJYVAlaw0SnL 0Ux:@;pc:D+-k,!X۰l)Dk6nDЎr9p+C6gZxR1>\}hFL<@FIxoΤ%\u(_xwXJdOt8(Mօ  $xfI *6P"18\RDssi3CaNП%\7~L1T1sxm(Ub.f&ڣKO Q'DX4_Dk oa@7'?Bc|W LpHP*L$s>q*ꔫIZ"xsEHqGg14KP . Jň#pVFT?F77"FCozC܈SQPkww&TkD!΄jC s" .{j C6 P4x6{+ *NA0)!)ႉ@/ñF,1/1]V&)C}:^*0Khڃl+Y:_ >Ӂ,F&AVRgVj]`a1 Ƈyvj%޸VvUqaxl7"8ex/XN8.HiU GQBɑ;?S]޾xE=vqlMAi7Hrdǵ5<;O!'̓¢`dpZ$[eLp4ӈŤ%D\pZC  0U8,b *5~[A7*jAۇHm&u!n}nImS,H7A/}9hwYw:%";9 0tw Lw`RAE⪤]u4z3=ۃ6,OM&ıyj=> ;q|rQabO,,yY2D MnG(ds PL(a728s`R9BC0,X@ңƖ`{N6}|؋_;6Ꮲ0HaM7hcgHA.dVE6x^{S%>lj}cClJ.~t9b d-t ֍e=ZTfRPŦEiKy/'WDiD6٠ ',#ɕW¿ܻGj9ǧ1@J`D+7.|6S.=Y{ j?vPBmCc=!#˳T> kC1*%sa;J&9vFg/%|/z}jnǧ1H*HB@8BM ъzcq,N1jC ̒Na "H0FP+<ƐxocLH_Uz&]K!q JO(3O$*e)5{kLrӁ:1a Sb)L#TՐ dbb={ E^xv^{1 ^5!.pj)y8apfP6yU҂$~ķXHw4s XצQݭ\ռaPӃ qYZ|b2?]]nfVyf XW:=]iѥrt Aƞ[A"y*eԝkEĥ l&wB&u>eʺ ]|!xJMQg<; $hlx%O.Ɇ2СuFCP'ˢ"Ě%tUVXuIȸB|5(I()x2]qa]HKd,Ϩ2H'F+aJ'=h3\-}QNd' Bܚhs|+TJP :0\!JI8PK},`kN4&imea0KE,҅*6k6rd]6]TqkewԄpX7VH޵8n#Ehpl8ͧH60%@>&H&_n{.D;ncgfRCehLY⣊U<>R-D*(!!RQ"8L3 JmAN_8X=Ozڹ@3h!ޕыeȉ#"c~Yo',G] :Zur`Ϭ~FYJx`&昡SNu,E"]! Oi!RTk:''xqEp+(l1㡿Ow8uGϭGtuJ" 3\2z4 O va}Iwݘ|kq!qVտ֢]q[4Voe51 cNr K E``p&ʩ IFcpUR4Mq2+A_  "@E*4U-`_ U2A%I>6op?C_C :77&A }G0$3$7si\RV3YUUw4>Mb'7MF~hДϸNwpPVɸH:in }ƛFcVH*f,6kcZ ay*?F`H{~0ݖQ 1MΗg~/u(?H}b m )a bg ,R,C\F-lTݎf!ěj 82н*Aq,2>+2 2'w`< J!J!8ԯha;j`m5,Zeڮ# ["ɼY}qXrXLdT,4N!xy&fiϠxﮡl< 8߁I=#Sb]1?#[ QƝe0#kSN| ogKo@B:56z1w2Jl 8멲̦̔""-\ju~*oC=E诅E! ZчF)z 9؁ԯvy196E/@K$b£mGWCd;tGEyv2cD`d&9ȁH΅)n+>཰ B&hP^l{ހqMs5䗣AIzIPr'N֕+&+gdHaC`xeX'0/E+m7#vѤ%OjR{~Ά.0Ѓoʷ#d-hwAZ4_-Ps!sA 4MI̋Pj ɿmGa:0k^fVg]ɼi Wb$RTY$1l&BaЋɐ̫KLu@=)ܧNFw iЎ'`RC!n3!+N8JPf2p4hc/3-H/(( a]wwuZt U%įik*J}S]ų#T881^M^mt.b'p"E Kv2Olj5 ##88fD6#M0Teİy$۾O>)#iku?q '_C]l249炿꟥I#F)q+ ( P36*,ΑwU`f kMȹڽSh\V McV&VP޻Z}tBC_[rI?\'h+๛ida*Sϰ9l.#pڐ.9(Ľ?LA4Iq,Pݯ$R12ǴGǶXkS؜r.Qf*!MV# PU t*_`2]Zmr@'5?Oq޾|,{0#sN :[8%ɽΖdWm}̉Y7W DMmKV=S?NC;5Ue4?;c#n<LeFР:npӏm[ɀ_`K?/)S3*_fڥpa㟯nRz nN>'J=eSpО󾁰:Dq57;~\zpeth~n-oƧGůl!uO|@H^4KW`K__%) FSa)j"9t=53?|?ggWg޵GTFiK߷Ën^0] ^~hz˵DXߏaj8`8-OşeLگ`j!lvq3ʯq'" Rq^͏nwA;"% as`П̣+Mgo쀰|ۭ[5q~4t_UPɍ]CY7~\&U>IIoL7?)Occa].0OJs_ $Ri߮ȅdfY8eVh3%Mft]U 6m)[HL iM;Fʤ! /ȈL"!/3+"?AI, =:ӻ v uj4l3hD{vQNK*r+s|^m *Ht{UʌC)HsxN2t QG $рMZ軗ɤqlXӔFR-&=Ma)(&TbGԳ Ā >09TDثr¹K]b(b蛳<,c9 KL2+TL+NԻY/Bcۥ6U!Jh, :EMA>ڭENjXqyĥC㰸Ġdv_adE 22W Yg7,\|l.@/ˣ]T uUz?+_g6dcB W6,;z'u+;؈KE!=2~OAi09U ڧe1u7׏{3Uת288f#c.@P^ɹK8ic ]1<Ϳ.Ax 2Ŕ^حEH1Rb6tSGw(2l!"'-*1i>$->JZ,ǏST!R2sdXE% ΐa yuHKQ|$)7 C8I"Hz#OJ%0֟xP7 @@8Q BΎ5GҀ3JpK_]VF Y[ŕm\o bYa`%1iu%g6כ`s=hC:Vϖ-QpTᐜ?S_Gp@M>w҈a4)EHyfabH2'962P"#:j2kxdaOA<}^Fr<?-Qz RfgCHa}lpS5HmCRmEQæ^TrrxT_ kpAq"{>v.EľYV~ JjAA% Ylʀ+A3P‹-WoXwW!h˃kxwv{iã۫0?\^0~uvqu8:q@ֳS>\#]-rϲQ(%v9ĝ(cE˅#iԶy߿uɔ9 v90%`Xe;tC2qAdS0, }vG{HAȆ>+M2+wC i] v E\:?PMJ4 # 5 S8#)4͍fh3AS]tEBpAA1EfFz/A 98p8 {3,5$B8Fڀg +xWioG7{`㣙Y9È=a Ji EQS;dTǔ?oA \jNr3Eu MilQɺKBel)7كJi# Q䀌 4)GVn G-2>F_e{&XMGR{A\-z8xO62# 7-Ǘu(%aC Íȑ4jgx$Z+CB+x6__pQF3dd9"wȨ /tl<4'-Rd]"RE.$FFep 8lhijvWC3uʀ(=vBqlt9"9W삖[pөaZZmg .qa&NFt /7T/<]?Z"@ïF+B^hw-!ynM M=)b+ !+2T,j>MxaU&0Yr*TVP(acSf=n4JK+:4 5XxA(zRze@dw.uF9I;dT倿Q z!2~%.MoQrA_}/]^uP s\ɤ 7 ڄu%=#2B$_YxH/ņ~M HMA.ͻieS="د_UϞ2*#ᣮ)U}*3G9N Oh(bVE0=QHGtN`4dXh R7Q懽=Sn`Jh,˄87!kzRze@H._#^ib.0 MI!>+ =)2 ݬƓjVTx`R7[K'&&rјr+P JϓcCa !28 '9sH98ϊ7 &?4w=`Qʠ9 ƑPjPM;j42noCq*!28Cc;b2*S hdu?sy0nqY_cUuV1K3z,YCFep4;)`?90+p%ݎ=evȨ ѧ$#9j3$T(㼰Bȹ=u O^ g#zA㾗1+NA?.U˄Y!284L GORze@tyՕ=nW*`[ ;@Fb9 %5,/岇$k"QYV}IVTr++l&1 #2T1cQU_6.%wȨ ' ;Ե@Y"js b^;*)[36V%Eϣ gp0ԄXo Ǯ7Cyǟ?CFepIOUmRT@v -VY}Y*7gh9*p<r*/dd |׺O}$64^FDAzKrCeǭМ`2*#~.xBt+2,ϗ}Mfit*U kYWKj+e+;}⠂$8D(,<7&H* ¨XWs&ǥ,cI: j6^b4O֗$9 KKrq[C/X1\g&Ǎ0$rtl:Aczc[iR>`U${P:dT )}ye\UQNv[:kҚDGʏDgZNA]7Ќ^ Xd[쐳"᧓}[oFO+RU ]ⓩX $1TܫT78n|}Xԋ';G|yDjA%,sZE{ew>PHhi9rzR_1TQ1L^̻`;-iELT]~^7koYw~M/ʑqMy/bt_}?OO;0xѺxc1\exa__pqa4]@hӡm<ӅO^bm/f݋Opbqlֶxf6y;Z0ͫ^^?_lF,7'7c~d\/M?%?o˰$\À‡E~z?ڷVa+Kە]~юk)O ^i z נ^o/lж`-t>y7~(b*&w}aO×~Na7Ja~پ4.Rux3Ǿ> /.giڢ~,.'iI~gҠvm J^ݖfk@>ܾNo+.[*@LkB̝%8gX{D-(umVo>H]*5)|> Ơ~q</an~[+,#+ic,uctMDK*`K!nz[k~X!yۡy:Y7776eqxvStcmY~dt>X]g^EwZ{mz*IPLw*:i%k3 4bj'45PEoFmޤA3q_tk{XǫZJ/{Ж?_7_.0#o5+Xf7o^^$juGU kXL)U ,<-LcrI}uX$>a#O7Φ~lJ>Ϧ)lJ>Ϧ70ԘgS򙚝MgSٔ|6%MgSٔ<LRU'L +'_w|(Vd ؘߤD 3 (X'A~1sanҌE'wy#xR~擅j !ttSY79$[g;Y 3愐NɃZa =bt]i y:A2G-'d7K SqCOpJ58F >f; ){5V [kzNSe :G5*Z FቲΈ/GV19F_X 딢FY`~*)FYɜMJBHlƥ܈Zvc_K=aqf~kT:6Xbhy|h.w6{3wlr۾ڄvV2=c :[[`6VO6:lSfрϛ)[Ȏn43`)tYn[Ѽ . }<2'2#ƘCܚ|GD16r·<i? Ov󜏁V_YpYdŝ,b<]l0 亂!vg_L____ KJ6*NYA5eB9jBr9o8Z-x%`r(@g#GN ܊3uε.ưD}@Cά:?:l'cbK&_c)=5>x'!DFTg%(HFC;MQȥU:hb9m(IK:$g0-?`ُQ$*-74 Q$)aWFN0*2"1'x$!{Nho5o[yy3C0KzmuoZ ]@z mۏgw~jz߼oDtgg{yTzaEm/'C!@i!p&~Ѐj/wqW4ikz?D#XG_xzƳHbHj{{839aS"k<}>ΩStw ;JrVTynsh[)UZ GގQNRԠxH;}!u}V7nv7fh,{μ;-~߽SO*Qj)!OG?;؞ٱJŧztߘ[؂iFa+Oan~\{ C؎qmLȞK3-*ǙϚsP3Tcyil;*|Vw>^!{:Lgϳٮ+:}\V׮kw~^qPh@OGd}6Fqkwow ?Ib`d&Wu?8W~ ~>Zv6ŹgIu"Z)-oU7G\__.wwχ㱓җMt}p+0X|փvxFTcz߇Q7~G/]~:TOԧA}zlMu*H4֜o/s}T:w+* 3.8<8ܶ^UdAߕGrt-o|}FqZWjMam .ލ6 mj};ӡI%,s,}-l~w9y: S޸al|n$m_-N箜ο>'ĆtbOIrm*&?z=['VVL|w?t!ؐ~`LHW1\}~b~ok(iD eGKbl\o_l Ci6Ԇ6)w+CVzVi2Y)[+SSݷ26brErtkz6Z.<2іke[3Na!g]9m?G}K`./ja7\2 ϣ.lѫ rV G_ XB)ȋVV혛 âAϭ3;WimȶqȢ/Av~dRhW 7Ny蕑,j\& Ul|/Mrx*e+ߖ74۵*QOy^M-VE:4h= xi#61D],h9\C=Zhb"e)3WղlB:;Ɯ͵Ivg>}Vkw j3P "T|D})m s`xT\Cu46 3j-\ @n&Nj$j:(U3ΙUYT]`22IbׅP#'Zr#vF2c#1N72&Qk@t+C`fkk0aGDsJ|r^%2-}Ž ]y*h&&Lc*Tw`s1 4J9[6(kpF'̕9ƽQBla3Wakq}1ɔS,x`lB:kf8C Q1 yXLYxh!ݑ!C#(.Xh嘱NٽSlDoaxLs v( :^-P5 X y9xYBy&aDUPd X&\9܋ˣ*ȭʤ7)D!ՠ!.V!Vj0*Q3sLNSPeq XP-E5i4mtÜk-Deh%m9 -w%PҘ]EV u :ΒɂpTPB\1`ޡ!MM(mcj`1?SlX."zY|Rs<+f@57.WRZ c L1\mb1cUmI1h#kFV-5* ΃p:PVDڤ pd!ƢΤE`&Ybm4 )̠ҡFXe(18c$@%7 V;ͤ1rIVŪR盉H0IJK1;FfQf%d!C`$5D%n p\NХe۵t^BרtE"B@TvƢ"DoaLޑ`j?.zߝֳZxvvC7w:ՙ5͔.~3pG3B o߼>/,Y3 9,gX9ځ_6mtw#aQOJ}:Bh̟PJ:Ry|BlHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$yB<" vOfOFc u`农:_PZZP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uP t/ u:Q d:{BX- uFךkP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uP#&S`PZd:?~Pkx椣uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:$! uHCBP:Gwӝ+߭;zsV{W͵ѕR)bXۯN/h=!] 芚?]QVǮ+jVjAAWt{z JiRy/Cy֞ fW믢ϗw83Lv0ӽ˫gr(TF+wto+?f9wo9+mX _^} HBǡcVđ)m 4NWwWWU]Sk;#LM`uh; .aݤ%0fnRGf4fSP?!sɘ$O\%ix*1[4WRTO}x zEI-Jˣwbw4R$i5l-$;2+[rx5v^gcd<ǝ])"pvR0|%gKzf-T}*r-2~Kᢅ\o`Uv.EOFUYAI5Ӣ%}}*6\76 Rh5h^ZJZ_eo,FS,tʴ>\gEX 8UWY:UYH\/ 30v>^qĔ|[?ÏOʒW[KP5yZNZYԫAP4ɴ/Μ;_W] !Uts|5ä쮦T).f=93`Qנ_d|5w5.{叧U[;5*Tu\i#)˻(Vz?Ay~0)#'OKU*= &kYUOU&NawLB*{ ' u !QTZ?3UcTT?3$RP%~;gK h_oQ7C,ڰ~m)~m;Lf_>(Rưؖfqx=%a[~zz2`@ \9Y̯rKES(nq]c`mł9 ˭."I5'2Ub=(`;]z h$*8S VYy3jg>siNTk%i q [t)d9>?yqbtؖCy;)&n5%u}1\a0BE߄(hOL]>0 .{?-,c#o3`mI-pYMP4CY3oDsKexu+`/\X B)Io_IC΁Ak+)lj&¨=!c V!YќӍd`7`%h 6w  )7+sn砧&~pwq{q&&ѳmkJ-\C]QD١aLh8ވi'QVX)¥&XS&a$F%r@MX/5^#Z^+sZi io|$vYO > V`ҁ*#O:buVPj|i\߲R HnLA ,8G0DNA.p)|jﵳiy=56.!ZAi" u 0/|}J|;܅)ܵ"f(VslB[):%Á j)){p- I'~Mq* Ӧ b8Xc<[o9u%+x|AP8,;ļ S4D DLNu :XdK;vz/wkbi:W-.RVS%A\Lz,V:= )&^82`. ?#)R-sY03 Dbi%p0 { -x_A:4/KU:_G. Y8r)VxLPك8g#zV o< / N^H.,̭{O,+Mٿ",HE?qf;VlAr‹N({Zr㱠ǎ?&ú'~icUYM&YT蘁|ʛ˅#E_sWp?cLydžvJ@7Vѿ27sTsv[ĵ`lD{:bPa~ lw8:&tS2?ȸ 7!4~ hVTwjث nf t|;7}rWϯhM濤 ~J80m uI2=c^ >"bMYK|\N`<}]L?_ X,Gtb z*4+jvJ\ΦtDLj4~>Ed*D%}je=9٦ē~4=@7qcXK%bf+P:ݛےz=E r S,o:=-{iF)|siS+-?öqٝWv'-K*BaN:o}ӹ̇ЙF۫k TUTq AX=[A>wZC-k_B5s&9WfYD3EJ[DJiIĵmJ`@a"wmjjutNV4Sߞ Qs+0H ማ\OA/걥z1KG#j߮&>6~d5 il-]65Caeey чQ0i g].]&:Ǜ;GVgljxHj=[#+QJaroͬٱmy W.@!|߿o0Qg<={+ Q%{pNCӲYӰ9дFmz:໴rC7^BSjZ- Jw~j_:tx9J2V)bky/Ͼb2z%7 sJa-C&xTM ,Lh) EpiD0I1ApʹZ#c93sQT͗ v>g[A#Ybi|z EUQ9>J@&W=շ5?5X^rٍzgw͔}rG UH^JBDSxMx{}n_IsdUG07pyQG*_~)MC}S/~>zx -ԫI_g}V@SOsutЌ u1@z*0MA $R+.8~pa*ΩP~eSV733_,Ve=ֆ`<,׹I"PS ׂȘ)zTwCx^|KD `ˊnlne 3 بPHTޅS߂>,,K_U> UyoM"1VPKSaJJI)Ωńt[Kp3"ا#`K@2N%0li95qnO8>侥כa9|P> ;T{8בϿy!+ɇ/-O\U%HK356KӆIbt iL2/VKzkwTIj|-9|-RQ؀os@q&0;:0fzLO 3=a'|PO*T'􄙞0fzLOIj2=a'􄙞0SezLO 3=a^k0~ @M0fzLO 3=a'P0􄙞0'􄙞0fzLO 3=a'􄙞0fzLO 3=a'􄙞0fzLO 3=a'􄙞0fzGʐ5􄹼Q'􄙘"fzLOVH$zCZPJwB"(O XT 8SZdlMJ(Du/p.s-$!y=#YFHnDH7 l  ='.ꥃ>B=40rmFd4s*,Z#:z tD ě}>fh.EXě}k {;Gg(3} ˸ZfHR4eeif2f"q4Nt6zr 9q9XQyA.+cdqR$$G$w̉/:ڑgWvgmӑ6wx[=>}K}S/JAE)(TRPOIWmQ *JAE)(TRPQ *JAE)(STRPqE)(Ӂ"JAE)(TRPQo#JAE(G)(TRPQ *JAE)(TRPQ *JAE)(TRPQ *JAE)(Tz^9(鍢TRPQ *JAE)f!dgs(+q<*"k^}?h^T CC5K#2͈iMלKB V a%|<9I%6.83⡳|q=!+8:&Riq7ҢTZJRiQ*-JQN+G"5By4RihҠ.[UJ{RijTJ;(ҢTZJRiQ*-J=})ցުӅ42xD`"51P2 |8(3AT4B= VE;H{o~鄯N[}ݓA@T>yEȹ`^2pmF%6҂82TKR1i i[`~J7ПE6q'gp|D/#r"FH@Ji<]Qj~~\5;UH9/yAIFDߜyRr^ͩVЌR2DtnDLH8vEd6ϙtq2E4}H)S 6喒0l,irmkٜ5q\r[_} qKm۪Ͻ2fsy_:5"ftF#Ir؝k6"b V&pN҅sL+ 28y[D.Oz.ջ}#xbpwP읝N 3<| nMH#d 4Z?$Rg决z1 luR/'00':='/\iFا_TIu}e]VGV  )1qr2_?[ aoVrZ}zII c˟%,A g(lC0<<..Kv}thװ7܈r"( w'm%ݓ"O (kUgШIY-A#$G)oFo<}ouh  7S t >CMjpq! LZk=˳,&Qhpﱓ |,+)N-D&ob`[!? GvgXHw"Q;tG(EHw"Q;:~#pfo0'Gs37ro*o@+͜s3G!N41-=GQ# z;@Es>^Gx-~r ]?ױ87mf03*pU^ "͌$:/vt[M*}#`;lWcasp"PΤLsa$څȚ.`pb&e60̎Wc?6+ Y*FTq` s/g:fX}_k5՛Ľ$=ކ{X4<,6~ecs?ZjG3LnbtReZ 'w{7PzQWoX,X,Q";b # tC+ ]'|Ts4Vwʯ>t#X!:fv -ωiG3?@7-l߭haLP{,.h3! U5Q6(#!DR1Qi.j?(p(/?FZQӴN1}U>FX"ڨg֢Mf4$ !+>L&t{<'~,DΓk9z{o`hڬiN!^?|i?9S_őL&)q ʱn<MkJ/~ۂ6)wMۆ֦qZ[-";ZC>{c=}֊[Q'q Dj+6c"ωpYDEXfe R'!Y!JsS&a͔,3ʨRDR"(g-֜+wL׭HX*}K:6:ҭ6q)iYVVQ^_-hs]u,wYn Ίlx֘BgBIOqLfLd"3P>I. rSYj9*:|#ffٰ낊)z4,Raf4qXŒpfGѸP)Ny=`dL&oeDeʿFS~"9&'9rʱ `_cmVգ?n{6zBS<22iN JTf Df9LB frR煈퉴a-5)H:ͤ3ȹ2YВHXX&,:a)cXɈ$dF=jgUZt$NL'o}vwz#O~LF[UϿizXpy_R3'A/Bfa'XV5O,0ت?@@4 >'#E},ƓxC uc :J%A^'!K; %.Xi1tyxgsP^F%@ ń !3Ќsq35L]i<3Qg췪 Y2/ R*VpMvq~߇gz/> Cj<+_ E #_"; 6{0EtNS{jF#wuoOGcrקϦW:"` #-2]*BYs[Ȏҥ_%\/H`,όKx]Rp"Yz('x]oaq*-. XCi-RgްSWܷ Sz t#S=jM͖|q:? ` -ڹ+C?'&g>dBFlU*P෩= zւ]hppsFPoK詆\}~Ŝ3Pm%&43cE +"JąO-w (ŀC&KWr]Γ)";}|:)?~/`A|㢚XnHqȵՇrlmh\8,z?1VԄ YY?/zU5du5L,o`"kE{@uv;u;GZ\U몾J|{:G Cć3| ZXB%ZSӇ¦>p ]x+͋\b./zq + wn¿vj;4`_ze[UC}Ӫ!85Z6{=&6+;pMck@O~쌬gt__Zfz9y sA% q9F,벊Ud)`A @`ԙhݑjN2>֑ܲg_,xA?˟g+KJԽo3&>DrqN>! 'o#Lz٦T6dyr^gS#ˇ3HVv`(y L׷{pn4h}+Zae:Y˹</Q 8ӬMrzBVlu:LS !w, {h0qO6~x ِ}*i'G8p],-ۜ[!a-y6,O卙othm>͔E|LCNVOw艅71vNVNLٟ)hHAɗMfurés =>r]t&#vs4z*)TI?Ֆl#3+S)4DzTléf-3[3:z;T_sK6ܟ ]j6l= ,.[ۀmhP؛-S˪0%nm6:md~ݦX9}ѴnٰfDͪ) gIK=/f?ի꣹8'%X\ T,k>=VgW}wa>wɖ lND,h7L S tH*+;ĨT1Bk#BzB֣w.{ҘnPѧk򫱽7'n_Xګڭ`mz8~Dtm1@_MjT`Of/(\jՒ|S*7/MSBS [ ,YB:gxO#f(VA)_ I.kfh @KبC<;:B+20 %DSbxz TbSzӺ],Eh%NdEq4Zϛ +rOYWԝ/&ZyK)d9,n>:bp9+7ėa=FNG|VX!%VXD8SrrH"G@<[gHճ<muTr*>ȃv& 3J62 ԩbu"+ݍ7#7"Ֆ-AiHDk A1~z eoJ cX¡oO=Yzwj6e^Fjz(=U(w%iJ95IuFvp>c.,ӵaՙ wVD`PȖ-&D"KcDUO먕(204#}rd2 @|PƷ Ya0jJ&!? 򾻑TKuhUUD$BVZ<0{'{g"1k!T ?LR&: ^b@KbAʜ{1BU(˺8˅X< 19thHeGdt[p4Zgdž}B ed2Gm%'1̄U.3o/@f3?bey+ h=;jP`%Td`*[6Bh)ӎ3TmWY&&MNJaYeHf[+Eol=eβm9òyJ"-RV!;@Z/.킑tFkĹT$M~ĵ e^_Uޱ$K d]LxDU~IƒSW%JlirUOS1ft5lVH2QP!d1T&5[=%$cOH`wbCVT.Hi?`k6S^S~[hw|װpr?*P sǃ7jc?wQNZ]V#4!z#p_('J`T Tc @^-lWٶjpN4+=l%oXr24P4B #h隶żM?3z?h"\ e=bdf@OFSqǨ=o~UA{1e)wKUVHj % wT>-BwYs1/(zccg7j^zRe̩8D BAE,zXԬ{_)*:=n9&d$;w#?B wyAePx%DQ :v5ggޟAc4TڔÛ] K:Xy/?b;}K:WE_a`oz[rZwõ*F+E\25R/%-$ smKū3/,H~/,mv{NGoһ?2^,=k~NV>Gc6^hR!rFAgg Rb2[ܷPj(5w⃞f8)ѣ"1VPK)ShJJI))J„tFc"{&CD{RD1%2` 1aS-5GI4 GN;pN >?yx9@xJ4R3Uγ3W_;YTNnn^%HHֹ~6Jj6L %;cL+`g6"z{ ;vl~- 2hj,Kg m훧o0FxgQ }`0yUN_<6M?s/(3H{ #zcd[\Vj+WUmrU[\Vj+WQ[rU[\Vz~W2ЌBi siMȁO"->3,x Ŭ։|hc! u!M(̙5\ ~cVGZ٣HQ_u=d>(0r:ޏ{BMq8zݽZbxuy1nJ;݅i"Xtgv„>Yo8Ȉ*r"YcI")mx˸D{Ueڻ [*,3ډ_VIW;fS9.(#ܟ/h!Q-|eb>GDc_b \x,1{KθЧ$>֮zmxwaڠg,\ *tQ 3VXX9O܀ăE@1 N9jhK Ffd juA 9ҵtMG6w5Wʇő(kt|!3ǭ(ˊF#LxN0+D{-"#a:0- 9Ka]IXBg `j1` D9{-!|`#6HHK+c  "$3uS/IN Y$53GcMXӲVZI;@(I{׷8*jUu.]pU'Otυ"͗ Hm҃Es#ex*ae$BfLk%!Ѵ1K(R-Hѷw}%]˕~8C3~^tl,orcL`2Q*hAJ! Smpyn#M_m>z#}xq#6^m[vݛn.dp6ȍCSz"m64vtjVg0 dg3xz;+<,g?^n^͢Ax3e8Dw]iy+e#UfWlλNijj|KÕea⿗f!35'ӟk"c-sL⵿Fr*bZ9 ҄/3|wr5QOo_]W~ “`*)t!y*|'0J"vbpJ#:%I/qkhnpQ3m.R |̵qG1:F4JJ%S+[.S`{-H5hܶ=|sZtId"H1%1 cKmNhV '.#ba 1` Fn;=\Lk4fSL7.8Vy_;` iρUr޼0:mR f⟩UdHAF=#(9w/ AWaaʝN}:I"FAyPr*0m2,vg^y'Kw>AW+!'<&r Dj'w|!zORPAaiu ^N#ܧpϟS}/{mv!Bl+.Պ)fr, /e‚Y= xiP j 5||3\NG*C,#dF03 vV2 8{H$@2Aw=T:p 0x\XwauT<%grǸєkH+<&(ȉGɔ}[޲+ʿ2Lg)((kRĥF% jxG RӂTWg#D(|Hx2Eӌ%G(c8Ɉ$XgY+FXg qFei|V:n?OENz1˻t1;/H{fug͎ǃ2Jt~\)<5K`迱E C4 #E~\6uc N'6l8({1:$M[VDhڢҹt|Pa!Q.VLָRR?и,{ozf ~ݤlOF9S@x$E0iyH2ǿ2穿ί?܀Ɓ}6tbUԔ5[z<ahx@xGxx/gbпIcNٷ^s贪qFihbB RE%BZM*qe05XP UөU\W3Ay iaN۝Io@!U\λ#V#(3rgt׽)ז4.T%B5C'LHmO鮮R 3{DBG UL*p},t>{oA{L׺JU+z-DFR ~m0riJA,õX٨D:a /]|Wo޽D]]oV` (.!ȽϽXH{ʵ\qkD 6IQF)3Zdom&5;/Jwⵌ4cbS?fN!CJ Y@S]knnP.r/כ0]O=_ :(:߄~g %v$&Wtl |ws֑o_?nf?c7:T=) %jpJ_ٶ@?ќή $SŻA `t^ x0:W1YؤM_LҞ RFOh9|[SpEgPdIH 9H D2jG"S 9,@7 9[i k~S0#E?M]tjjOBsW;ُ.B.)azL[=i:#JJKlPw{ʔؔ[IINCPgIDdp-QaIӃ'kl#p4?ڍ85}2:"1 ̬ٓ!ORJ铦G  ~t!|dI$r=L>i0tN: sX9bAƕ-hq~=MWJc1F13ɬ`)$Ť 'r.&^u3{\{/`;)J[,"ƲkԸ( ,%ѻPj|l ѭ*ȞWd7TԄ9pt5z*mk]%G.6/5rͱ |`t' 6za (?Zc\gV\7x ҙVgpb#VբueNI3S:'&&VQRƌƁZCbĜEL#,8ZI}{񤙋'C=i;؇?ϒfyum÷PDTw˪k>oޓ\f0B= QDLeD^?zOU žj,D#wY B)HH Qt 3Q ׆rFQCQ{0h$vFn^uЧ5ERcHsz$~\+r^FH32\*:SFK"RkTKQ2d$xXHT>Yѷ@ Y)!sOZ§ :f,f;bh'cG$c)(Jdrښ\m ́2ebeicH6AIX G>@|4H_8+>_˳pF`E3s#X ~;7t#Wg7?Eƴ]jk~TX0TYO@Y%^Y!#gf}Zv$}WB$ i~`]k#[%գy-]]p%gl)½/bmV{ޖo,|mYλSwPT}7+4t4 {MU;lZ+uR m z)_g0;؍gގzNz aIsT†? x,9-rXh 5(§RIt(M)ĠP1G-rnSx Ȣ` taZ)"V?"D4.ev'k_gKW'n6+I\m{޸(8yKB7n}}Q_b /@xGu)co9W> yXOq# ^a:N͆U{܆zQbkQXoAS9ⅹҍ`p}\g *ZY~E|[Svq[aN̓i 2(,!x0k5f,`ZFL&Z ix1Ψ9 蛅BGR'ϓߛ>dF2$5Iű '(S̞]`$S@[)rMIO_lvb.fMFvRXYq&{[ZJḞ TTFdM(֢t&Q((& [)o@) ײ9tҵt(J6&jʛ1㻱Nf~@n~o<րҺhd@aD%u yI"R:"5`I!ցn$,r!c05Y@YGa9AQN^ BM9gd1MHK'c}E2FHH5pFBb%#1 )DXҠ8KFbR&dlѲN:IkSNZZYʼU: AC+wZ~i/;8x;a<c$Y}?_΂I~D 9Ck@dnLk%!tnh<)k~nqDi4DCi4Ya"$DbScKbƖõW c[ 1=09ۡ$RXL`pCZ<7֏?eRSFD_'(H8H7m9g͜JJvQSW.^t{ */y|n򙺦3ۏ'q]ʘٽϠ5/!yH)mEZY"JȵQ5rU/'[/ mJݾeۧ< 5-ֲ_IQm/Q@jі 1*0|̗%/C߷ (m6 k8ᨭ)y'v,FE)ʬ%PT4}짤==<ڔ*d20kٿPYׄVFv׍T3X"иXjog*OIR'=DŰz[GC6w2i` qI^@WCq(I^떁Pp3Y2[CP<8aZ~҃eAJHHzkGJJ~383wM9bUE*a}6OS sm WЈ1]DVӦ.Y[KQR4)0;vs*\; 9wcB%|HvA s fXy 5mB\;1.Hb7ƫ9cve @9aV2[6d 5vN 7?]գF1h(!ȅ &g.rPNr42uA u]gx1ϛUh3p-MW;M7N~NFJ]csөj|d?SU㓒O˧77_:77:{fs꛹ j+o-x&“CO\weTxVoiJ3bmNZǶY~\gUlfm3ϙ^_}ХjDu>3ɛcv9)0\fSZ9׀aD:)464țrôɩ*|aMb]}u܇< |!dޖg#[kLO*A'D!c=Iy^9ZKElr=m*HX\JlB O1jveC|e8z L\L;pп=ٻM6_rm~u{W@!:N>PHpR1_1K |aTZZF%Ш9񯲱ޠ5L?_?D]۫]OIa?_Kܛ_"@=x]˦asڡk|˚|5>-BZe?O|??.E{5 ]^]i_Ao m9\DAߗص_,! 0Ӏ:-hsݦc`clԑc=ⱖ1[S0 4:Ei1]5} 7C_s92K?+ÓN=i@p'5ZcȌo3o[B&% 8ar.pab( ΍-!2)um?B !3itBȧt.#Ybi%b+Z zY J<{ W^iVf8ۏ`>o7}xPg7?GB&VyRͩ䢠L P{^2=rfv/[朽(ξbxBTҖ:*h)VBJBG ܎?J&] ½ޚr,ֆj׋myb_TwWAA=P춸 ߙLGqWode\tg ]V1^2V_˅LrM{=Wi(*5~&I &uD9hkcN& ĘѮ#mWNٷ5)\::#[cJĬW% ߜ :J\"Z07"oӾ/0AV}l%<u }<\edЗc`h_e<gvC[c?j\ۜͯݲ5Pobx P -DkH^,_ȹWIlmIc0R9c6(L )&m-y!V셐ֽ&cv2)8#11\#A0Ը( 3YXKH`΁n@_^TEWpb>_9e 6>>obcj+2MEXvsaC.`GlZ+n8u~?PyC:<Ncʠ:U٤N7G5J`2A0ɵ r B($0f4j2#,daJ;KT33+0Bb{i/R~'2c1~Ul[Dx;;LCV,'~0% $g\Kr1q4<.j'O>؞۹oIhw d&aN67QF,h1",DDꥦQPH0<)c"ҝm[_ۿwyLD}=>YS z\y}<Lg\b0a0FBWŁ +3_MԺ<VѫB&R=Y@0#(vM=([HgOn*5LHWo9{uWÕo^.R{My-ZYk-g1;F6 W fo:QQSkbcf{Er^i{+jzyl\3PjDU-Rx웻5-;B`dT5[ݔ1]<@y1(33ގ'F_]~ǸN9t^7Z[1rYLVePJIrJ{q)?.I\y2{$)g:.gDSrW c-ɸ$ fw ݕ\!rBJj>wbJN]cwW %%y4B,+W{Hl0(mvW]}DR24]GCM̉b}iij{C42*7{x6W 13Csvތ6]k}Khh.g65f%fX_E8ɘDy0{K.dȋIJǵ(x[0)[y;V_Y۠wäg^D=NI3)I[QIy?z󗔱VV! jh]U_7GE=u˜?_Oz8Ybٲ7>+yy&=N5jYhW7YG,~ Bl03fXiIϒP.~- :e{ev&^guUʐ+◜^":Up\NK ?З#BhV_?5!r(~pa mj{PMfOm{oGëbh ?,]:V喒sVC j=|P1*a㤜: ((e;t=&MΧF'[TFo7rK· 7X yNCBKJWǵݷ/- H{|슡sT\Jڰ ?]0F @I`FN&%q99PRF'@`~ljЩ$-?zw,  㛿~y**6&-nl%]y‰BBS2uiEN('9SAhluzW8ԕƀD gWyY -`R` R@oISQs5ɒoMۯlz{V8gN0$`',(wȀP2-,S 3"v7h&xXtنaxVjZuoNvmɝTkV+1NM(%.>;fVe8X8 A`QĖHɼ5#{[6rc98`XasȆ`9A'V&D c٨Mh*6n48r)qAH5B8]":Ypߚz4o=*Y?,^ sZD)Z2li%%i4f #Jpt^Jf))vjHI3}>F>tgzLo&ME;e03}>gzLo3}>gzLo3}3>gzLo3#Q9Y7US|ϼ@\5ϔCpjx3r\0srp;kH)EԒ)E 0sEKd]pRaSa7?Y"P5*Cz <ŀjRz\Tkxb9' dg}SYv ㋽S֮]uDv6in?A -Rԯ54r?  K雟l'5#K0ނ=P˼YEm_^fXQ+Ӓ0Z㧭BiTTi:bo], 3 >THCKK)FD·Pj("%R5c[L-i-Bc-P؇JL&M1ReIG J"ͱ `>pUuBC8XX32\*91Lz׻TDc&=jрK'Ueڄ+Gc1hxX+%11t?GlFPk \ PX074wlcGdcR*#Ck?jfPYrzv2 mtdPN([f.]S?"Q/D[0śB(àNDs+#Ȱ, kZ )$+FKFWڗnW=@3#9-QL~o3f3?&nMc _e@k*xFIQ:Y ǫ I}㧿qj8'NM7J!D!e-zg՘IDk1hFs+%3k껇L܎3 H&H +"!,$S`lR"a)!]qE ~=;yZHVߩ& SBvj0 %Zl+80Ȍ ^*DNll]` ߤ݊ zIr~ Joj}@]V420G J_B:`^&) Ӂi]IՅXf? ȅ4*Ԕace  D9# Ҙ.,A%XdcZadPg($^2!YobA% 9Ij$fXǺIliҲN`EmUDܟ|}-g'3v_w15?iz_kqCpÑO=>%aI7gtɛLMn7%߻ >~7J=خ y>Dc@Js AqPZmqLJlD hYzP01-O_~·b_Yn6?mBh7_IǤ6ą. K]EUMA-BgG΅~tv,̮஋^?ͼ';/3N R;!C '8 {n8ZKElly'@3]mu/ϗ|{5!l㖏Eemʄ-a2 Y&t{FJlxiP)W 5|k6WG*K~D"%"]03 Dbi%kn!- L= 5-v?\NS)wu串O#DϬb1n4@5 2rQPrFyd+<,; JZPJ k#`.`GiEZvz/}k֫,nBk'ߝz\虦SLpna׮"G+1yD.àR%R 1$fN2=lռe=E?_C8#hxd); B#L9롳K)(ԝ "x{nt1z7̬! \*,W(';H(tI|5>iu—Sw~Z=S-$Q}3l^ׯ{v JiIS|ۨ49k0!t> aJYS(U&?iTUOnFg~zpp5zv #(4+anv'޴/걥z:3"j_*L&t0FҖ_ff\dC[Q0iN܍پmVu/A[wrY*Vt:0}T+GV>xՕJRs⏺v~rLǿ?O.?]Ӈ Lۿu#0.eX*Z߷@-k?eSMC{asڠih7i5%Ϋ/UVmMJ(u]Dqv̸Nҡt@WZ;ǿb։KekLZIRܭi@`_t$Bp嬍F&t:*10J_4-1}1_,;.2K?+ÓN=i@p'5~oʫ_@euRWD08Ppi9H8 AD0I1l  vFFݾ#T1zHu՜c=O5\FhZm^yDQb6žrͨat>j aotzc9q16v>̝.P?I 4o6GQGa5:_a)jVbXwַe @Qa^]AkDIQA)3ZX)ԳJq{"M~6دSȪ3>QnLg|tgg6{mFZBZև'3N%.Kة%i?v$D%18ggkiC, sxïH!a@K YEE(E 'M 'G Ǣ3{8F.$Qt^Jf+)rFf2B?\)E|yiVv(af6־2v }gܷhx̖ځ,5F<,F/yЭd WQ` QPDLX~+drtٵeע ezvqi\  8̍FB=yf9rFQHP0ORm0RScӰB;Nxr0kLnr޴ݑc/Z8C"`.ȨcJaLhJZ43<4yrLB_ ɸ'0V`6*#Os(ꬠ)+x:tB}8mFKeV gӴ*V;S*#dWDɘ.ϓMKc5o tR>Y.KߍC`RC'wjwKBx@3M314X?wX2;蔃.@[Π<;c֕ΥKϷNuW*v~óJl5%joՉ˻ n v9iax*Uϊw>)5sT+ (p\xM|?~>sSO"]%?&A6i0սaUk[}(ɦj5.F)VP jÜ}?zr Iَ7V?}aq]w3M=(6SgfrVUE 5ky9N^f!#Gx)g!,/crk;cFyGɒ? ,M>X|& <4W7r!IRW=GE<2* qإ|IB9UaaR"嚘Ks49xjTϛcKM" h# \HLM6P A|@N@׆D# 4I>b:uV!!: ĤH[XA`Ir,a#ֱ&tl2kZִiԴְ(E'<1~'9F 12 i;3mg̴3vf̍i;sfLۙi;ϴ3vfLۙi;3mg̴3vfLۙi;3mgF]̴3vfLۙ[)ʴ%vfLۙi;3mgHyჂ+[yP+8&Ж4<0c)U0xa20b6h`P3FĔDs,"S$$0498=RsDeGDH vPlb`}V(FZʏ3:::š*3tDٽ cp4f}[4q]\kqJ1ϫsH[]DlbeP%=+:ywT3]:O IUp;oB!1qV{萩1gp$ NVF=Qxg3pB-b{䘐lZCA=N^X}.`^D{0{$hfCW7 țW;S~F6<5vw ~-e<VIKn3I^T DA JϪ}8+u^uAA l_6" 6xz'1wjmv~nU\[uGo~q5~M)54zVMz3",{-(0rs[u'td=Sh2?RMMSk ,,1i͹$$lvXFwrtv4bb< Qown$5Xc#)+-Db$e!`/a[jwF(J$yl(7jL8(P@[_^˃Ӻ6Gd3)gl -4Bso>.;O ΐyk *":{s#+)~Z#X=2ND,KaB`svx r렼V&D\h~F,B0#BXYL^jʈ $EV 1gclxAo+z׻cv^y$|ήK{@ 3\eF{޾m6h ݮ~lTXoD[=QZI'q2"$vD>RNcD A@Q!%UFmT (DԀRELܘ85x!zzvqMɻկl=ʢئ/+ey RHƦgANVYJu4cC>ԃPMQn'ۙ [uD}_:N@^6o0\FpQXr3Q6 M#51 9r Bs΂U(ڦ": RvzgQ4mk%LHͮz o^usl]S!WVvu=Sת<1 .+ (]?.ҫk*YÕжGJPrLywiB6y]oFWmh1`/s&/\gd#vf߷dY%Zj=q$MVwWUWUW*y]sZx6/ {9xak9Ѯ}_;Awm}V~dPTry sK Hz@{ɱ$QK ~{"M2J2;72P wgղ&qL*Gto&ՃEXyh3R!rFAg U$SbJ[Tu\(OEc Oe0RS D7%T8 y-SDcK$P DpCLD`XYh6.wE6r6GA'G'Vj-F%J5-Ǡj;FȘ+xÈ ݴҖ0##IE ݻ_,UbbtxCm1I?fz椟du*jvkmf3=_^OgϭOp@w/%sd sɳ%E.,XPX(CL<S Kwݽrl;i+DԓfL଀;v:.tWrBསۮ{>׸:_NLS3 xĽә<EL5a*2AP:_nۼ1jLj6i/80:"E^dk*-l։Io}mk}-f֙D7.}YQa›)p!6`.7ۂY 15a9XZFeaZWdI"xazsc N[pxV!-lLu{h~6wF Q9CVRŇg9b=7ot%2?kx;J0)3M*" &Q3+W+zWe5wkz:TWwu`ߎ3oA;2K2OCNvLڻ=UxD|x>SL$po~pTc`EMdꄒJO Ħ^^t0-Xi[=.Ip4ӕTz,l{?/׫d~b۹I6tR%G( v ϠTt-[3hG, spwP9@=|5S &"uih4-9w:N];h|Ae8@6q' Y.c~8TB=:S;+)[6B^rÜ%Q [ZcI"ڥ%1}@y >W7}Q4}͠B:'eQ85{K.d ~$)>֮~wMCD{]]!t/WT*{94Ud SE ,(гOf [wkOO`zVZ<3>99^;E AHRx2L7jX$"﵌FMFS;ֈMK&/'1&H +=GEwB8ҒPNro%8.ncϚ}f]1g.u+; 핯܈]BBf|8B}4#l\K Ffd'K̙ Ydlt=5Tz^- Xd1Mɯ(`0KX6cF&xN0+DOKґ0хT9tC~}AY135E r.#ȁe19$Q^^i8XG!1z ",iP΁9KFbBk ZKZ/ig-a{ lU=q-ٍu8:p۟dX@'5._(̥%Wrn3Six`,(U2xi2%x)$Ť?%*:.`;(J FcV`j\$8' k ɚ5mbH@: lp6e9HysQ lll6eP~c5鈩TzAe5v걒&e>hGJvܸ9ё~K[/%I&?ZnM OΪj8볷pofMH ~WVh1% ܌NDY nT2<(Vc>6 ]K[$gn'We mc .T`p!m `*чXu|=K{s+'p4ZݏUT*Q/rCg-n1K ӛpR+Wt"4drr8/OԪuKO '|oݕvw:ꝺ:''26|CC^n!=t\\(wp)B5ƌƁZ8Lň9ʩJ&oOu0rUy>ۋKզbC1 }C%ԺU_x0ɕ].Oo[zm~~-;9i4i|Xot$vqC:izA?ϫVɼo9K/kV6Ӷv{GLS4ENj2I*?{{wrΐ0~J,-1i͹$$ގS8mխO'*2!vn$5Xc#)wId"HU!`/a[jwF ^)$q2.hS:\5g2EW}(|x>ej')[غ<y$@NjoeL ߯k33L_2?$>M5Jr&0vyf!S#uPl6L~ufwp}ޗ~BN g2 ޲,YkK׽ G',.1J[ gHɼ5B}]=IJw9x,ZH,7sQi zX7Cs:( +ĄK*Ljd<)V֧6 ^&RA pI0<)c"ҙ]ltD_[JtJ!oۆî9Unj&9_f;}}-Y'MK5tGN)Hy(LbQ@y#74* A 3hQ9b0XJB`Y2{"g^6t }~WZWڧ >uh7x|!ˁN8z)4"'=Li{{'9:Dc@Js 99 V=ڦsV": eA5ݹ7#$Η5fǬx:aw&f7م:<=kIU$Ch[?9o#ɿ2ЗMpx& Nn!S"eR,_<"šHI ivWWWUW#xi0:zz@`1-~Ϯf1!ad]IfiW̳O^%1ꧫ:=Ck\`j&lhqs^цlJ;:qv6)ms~EvIauls%5%^+x׼RN ܃ & VfK}vly`Xtpvlq suܗ< |?φΥ`fje='D!'88$j"","]Ck=q1Κ.)Ra;' Dz0w6"]bmS d g!U(aA  3r'n.FEaN+t RpdጉnS~,*E)Aymά됿G. YŘch5(H+<&(ȉGA^6[; oa$i *JZPJ k#`.`G1RیTcIpBkC6S_{\豦uKW 3Xsɕ<"a /j0F$IM|`ָ/iPbqg*uO Sx?,AG-5N1V` $"a:>RLݱå' /C_ճo%j@YiC(XR6ڝT~p| YU,QQLS84>d{;n+S K7oOOO&W %sJiISp[k,YwkP8[pW\v*UfžY9<]KCݼrvAS`.A/JR=휣HmL PJ`46r[iҼ '`DBFM>okDgk.{U͛#h&VmګT̘d^dGoÑKS_A~,yO1Ά囎%9VN*ъIodA8s ǿ㧳>?Ogw}w[uXǍ ȭAe+;KO,-Z֫i5jsYuﲏxnan-O{c"ݾ[Y?,sBzd>Wl`˲aoݰ ]s7QC`svkuN{屶EU5.r}KV^BjKm6uF [d>Rh>[ͫހ7FX-m:EZݣvV{RQAvVGPsi_RG:IW &uD9hkcN&NҤ^TEB~oq_ϡRuU7"@v/xk?:sBmiI"1Ժ5婜gt\6p@$#+g| )W4]rAuNwYո~yKՕ 6aRhr65y C{Y)e+Yɧ߳ Հ&oBou7ޡOu{=kw{ 3': Kp3"'*SDceF DX,-,xGI4 Gtw oƠҥ`f`ÅD!JuVljpӰMF=%:lB8c1eqh_왱:xbXyŋ׽ڸvUP]m=0ulmC%ݝT ;)wHRnIU|\e=T_fϯTttkFb T+@.nwZf0 jA ZzՀV3qI&Ut=b)6vU:цvi_վ*wh읟xi9P58y[r}|yɃ2`I. {͹,ObQj<e4ZXdf HAL! }Z ] o[^%hgD7 PIپNGŷDVN~Z=} U?ͮh0.),CVe,`rmYw 1^^7Tɮ8zZ *ϼ,4=)g?M IAFL9>g~('cN<`P蒂<)hd엛$.yyf=!cFY; ZDl ha[jؔBs4Eʮo^={!te۫-W[ЋI>tg{Tp\0R;|2TU%<@rFϵ,w`4豼SU_FqF5I?[<~~՛iJ;j_* h>e8p6r8(Zf`ZUY.IMXFYee6}Ogt_Dtq./"%VZ@e3VHftGH|WΉ*aagx f.P_z/zr=EgUaa *s=&]ek ;҄\IˎG-";hN< :ɽ7?Q)/D(4BB,n\`D$+h;O6Vz~igW姁"1V얂!-Oe0tTR#9p0!`UmJ%:W}$7Ml1 YfEslS/v'V1Vuq$gkmѦ>N&E N1bp3Ɏy[aʹxwN<]6ūt.h {'X#.5 Q4xXg }hZG'()_a4fsWܼKr9[)UX܌>2O9=E4/~}]6Nʡ @}gp'btбS}Vپ'~QT{66oL!P >)UU /K2tSaEOl|;(ZØVy%kR1^(<3Zf-BSm( Qn)vbM1܊L ODma[Ƣc\8ߎd:݂dqnS1%np09sH[ B2SVk:F ka#tЩ 9e?FFJkn-Q1B760BUg>1Cg$E\%h紫WBWP\*CE)?px}iEMc}8*LZ"fGs vzd0 sFC^H;}D+ _ג7 fbI@"0Qx±*Dqx1"A8DvZ;•) op)~Ac.3YT`Fr"% [j)Ɯ 3h".i;uBﴋLRcP4a4 DNm˘[o 8w&8/p a!Fb6Re\9% x(j/Q)H($sŕa Ey`aAjmQGa T.Q!aB,3+DEXx"0j.fcav% zSpmI4bB{7!XSjIzfB-Q(_ %T \:fdL9@ +!$ @)1&DdB32l`Eij1lsܰM |mRΰ}-c oۓ\0ֈ[K0+gVs Azn%xA7Q(/@kIY6#/P<⭄=B_XqR^Mxy^JmS** /!r0eF*9b}Ҝ3kN:|Th`s-X%17 Ih$ 9GCk+AwD X[ʹq(joBRIpڀ'52(B;@E N&x[δi 3m\z O~5vʽA[ z825Y͵K>=NNV̴ D%FۈG\FL;0xl7+ (WKM`3 ƨ%5(K)s2)/1Gg-Jk8rNAqO B+M0QXT*#O4buVPj|i|S@ل(>kڤ)IQM#fdTapb,%˔P"|lH%1QzcN>(L|y:춡X!EbUQQ'CvYI(JeioW9[9YRCo˙`(?*rS0? ߪ첼CEb#*ɾM+ %)V1S L pRpXoꉑęsZ_]krט4Cɠy2vՓ_4Li↓hd)/d J\B:`^e<.D䤪C<3y9hWa iU3Ʌ:$0  еpXGplasN{E<FHH5pFBb%#11)DXҠ8KR K9 li 5KN;iS0"jUuN݃,7d*-;х]HQ7,(5lK'Ʃ(&c*0m2`%?P6;m~l!/ݝZ]y9|;7'D!M!zO4wSKu :XdR/,q+ZZ ;2[f!RMw I/eJgT􎉗*f[N(>HFJ:KJa@N+h`Ob @ܦXDCYYYNZxUe궆.)/|Y5m\ w+W_?ޗnI/,_v@h}gcI>w:])Vmm(> W6hq~=u-ix`,R*c e2a)$ŤA[۠-|ߩR;  X`ƈRhE V`j\x+YXKHvsLj\#JM\K9%as)ԁ|VL-{S)^tt tRɫ#P^'Q(]*>3-Ǹή%X P93˭")XT^Oj~a;!'$*8ʝ7\ Ø8P=tT8iZ+IoE ֟θP1Y''D]nYAf1oMx@PD{  K"懝v7h tq>3ÛMn?oúcz %J:#aQĎG¡^ic֬Xh ʌ )-2WlrD`p'96rVsB cU$Wc=.ˢئ(Wey Rv`Y: /]V 'B b0\gFpYr3Qc96cc9ZZ 99 VR;h4`R`H^> ΝU?09շ Vc6WZOy jvEץ^\M\JڠnC$7 -:zg@bx!6:Sh{`E6+J ;#\e;˓VkVxYcDNc7c̟7k|oW؀~;&q)wI:נmhSE[ "HC ,N1bdü0o^IZr4o|n{}IA3|7o@L5N$uqu23\j Ǟ:H73`ꌴ@r<_=VJLppV Q]rWd!kYTyI46J4)J:/EYl6w1}L) ?5H#Gƫs`M4}{szwadY7 %%I m s^}90wQu"qb-h:t^|~ _/spla13˺ :?0c5Lg8[˳bIP;I"j#Z7lnH'&m[K<]6nQ͍;&)HE~?o"//];g0Ӕdb ovLB>W϶9G:ZKB&ʆ;&0J*P1AkeqX#+ȸ3hc޲tVcP,2ERfQzҔNDԁNR5}J8It\Lq4;cޚ'5F_>L^4Ye>8\Ps YnQj<e4ZXoS)AژɵeZXym}N"AkMG/>J,ORH!BGȠE *}.Ad_SQҙ{TQ0hgLr2făL#%Ds~L%z-NsGߏ~;!cFY; ZDl ha[&4'(xAc=)>^OJMe91}{\Hwx#ttLBR“IYDia(czBq+ĩ'r`8DԸSqB`tR*ɈD$"*1bzJ :!qVdU"WSW@UF\AqPJ%dP)WoG\iF9)v*Sk\.,w w/ZSOW=(}q$#O/k{â)33Z_zADs*/DH@>r5%\NJuR:QPz^3$j#WKafԲͨG+݈z,9U"ӓWCo?{WƑ"_60rG:$@b >%)RPvϿ kxeΰWuG◮Zu+TW#V ݿ}F޷O?}͜/ #~`xyOi>&=4\L \GDw!L e"Y#a#7HWBVsZ=*M"=_Q-?`-ǂ!/ k$)x+r֤& (G<" QizxA H,vZ;•) oBiF^`}9սOՊTS*vӏy^:xI%I0BZσwx]ՅGN>{. ;#TSg',kۛ$t!5 eSݡe݂ؗiP0"P::yMΗ C0!Bʂ;cƌ{-#chnDd++qA&>MCQ[ia`xT~8!,ȍ$S@[)r쳑]D[wC%3kKn`>mWCG\{ 㤲RkZzJ/vh)zZddJ%Z&$)!(zbdF/p93d=Mk1XUjjܵ[>n2(zCaѧAy40'AZBI TH'K}Bґ0хT9tn$,r!c4WGa9A~{-!|`#86HcrH؊Nƾ  #$$8XG!1z ",iP%LR#1s:CZܲuI1%%B8yZUW=o_{+޳> J~E1@jb\ɵ<mV5<*->rŧ!R 'B QL4ׅ\9LiԅTpŅT]^ԝ):A)ӝiyY ƀR(` SZA58hAeJYRpLF>ETxDJN(E$s@0 qXDAyAܾX1TLj2T'Qɒ|0uAXwa'-*$*xfcq)VxLPJe/ط6[,_> ]ښv$ZYiA J5Q)a ҃xyDL R(:A-H/i.4q&ƙgz)V!=HXsɕ<"a}HB m}`ָ$ qgUKs=E?_2y<%c2dnc 9I=y@7RP1e馜 ]ך@ՀP&LjN\*r,H߽+EK\Uy$md|=O{0"W VuzJ}s?RESgI_M.`AEP5{q{]J{RO3w'7#rsz7lA=10wӵymEjÿ?Nƕop0|j ֙_?}4}i13[DJG UL;h8xj|ߟlz8pmyȶY۞U*EL41hЏ%KWFӓ%=6T M{p{7?Sw_wwo_~]/`ci%AL?"@X7cjkj6(S ˼,#̻*>ZټuZ Cct)wMSR+C]sZ1 3=|1u3hrUTm*]s; Q!^?Ӹy6/h#-Rc1˘-bcvA2lN0zT` `, O&;;VNj]~lʻT@?b:)d+["ЊN}Q $ "o6}s#JuFݩ#~.*Q| /{ j;d:X[*Dm}Q9>*!T&o뾵c$O,8Fn) D_<(#NVk0`oux+U㭚vKwUj_݆I$GIQA)3ZX)]Ԛ𲝳i fU;h-#;x!T/Y xk(.5d!f( Y@SJx-R׽Kb\2f!b/06T`|؛}i; \iAF_ѧa Ilf| 4_yٳ ˩> cXpfVf!3!-F`GCZ+WhWp~;9Ж4<0c)U0xa20bҡC\DB~Bj{`P3FĔDs,"S$$ ZB?939=R үY yMGytBQ}>enVd4cyXԺ9vL舧\GCGMAhQ]=}TW+h㺸mc`5@0@%.,Zʠ:EVQy.}^I u9?1VB8C S&B(O~v Zɝrq" <`Y s579ZryۓF,B0#BXYL^jʈMb+Cʘtf3~ָaRy H1V "h@^̨)z6*GT KIw,.kq ǩxlKm]ZMӂvEgY˲䤭muSƮ]"kiu[%*5Z}(M!`ڠd0ȑ{Y[DHZrV8Afx22X׼r9 X}ɛ%Wٲvy,@Fhj᥼ZiqB䌂.`!.#"ᷠTy4NOEcR2y RSd)D ,Rg2DOf"]"8%b"`c1g;Jaظ[|8]XK7[>?Ǡjw>+1SK1|j~I;9J[¸;$`kmhS "HC 0'N 1ؙd'YFo_Hu4U~̫l?˖N*{.X:kmgߜɾRsgR>=9EZ RP]8rȥp"%HIyɉ:{;8=\7(ۤ"*1[0}gOrkE<z_yQ>9vyaAn%cAUPiS!=q=v45Tetrsrs4>﬷, |Y䥷vk õ*Iwlv״PWzx:<\V5 f'%˺zufB w2ˋS K{vVܰo7߃LmdiuP#>L:-wIZniur[m5| O$$iJaaJ»#PoG(bae{ l,<o<2In/l4lY=2wzRNDi{ 5q=a5޼*m։C_/E_aghW7] t+CQewp<8 ;|p!&35<5R{ E-20c-ٳ'tKjv7^~>-GCeM[^Jl./d).L"WTmM1)>9a3}PNƂxiPe(رE#GDw!L e"$ dٴ):r4 ͙P4E8hreq?{Fp^vmފE0f,e/E'Xrןbb%; "lUtmQilpeK 3q9&s0Ԟ\ |/3w9Qq=h `kALWwWfV͔!i5JW\j, BK95 !yN'`AP{<;[m< w`#xk:.Y 2&'HI9#5HIPRF[}Q|F$P6m@GeRL;pƂʌ )+5~WlƝix9浡9?ovDA`TIN4**͉2V"EoÛ[jv局ŗD0zǔ[67F˙OTYtVKƒt|ASlrK#/4)tBgqXY@t,HQE—D,rXB(^Q=lTzf|5JjO:/:3lDTA% +d!' VǘZ63nxH*^H.(գ"=ۅ}~ mUgPܑ;СXʲֽ\HU\g_K.$VJ﹐Z\H+وO'uo^*64wcI69ݘ bCT(3ZoB׏/E׏/M׏/FMI!KVV4O^xIKRy1eFe%@*!$U baKi)6PEQz\**F - ]>"ݙ8۳>3Ǚx+᳈ EV˱{H]{"[j@8ssz9ۇ_M7 `)47%y e\.2A$Nԣg:v1l|2Jhu5OAo :?xwv^'k#Q P/Dj14*"gEe(1xWIܽZNgXPÒs 3iZ*(X*d+0N+le `B#u¿׮-(&v`z%?|c|5NxuOhpxexh{:hI54pJVu^5њJ~EOے9*4Ői`Wa4!+S&"ƦZVBb.EbLNBv!XcoHǼBX4 P X:Q*R19CDM.%ʠ(MR1.tl״^zM{NM{ J yNQW-/??ݼhWla <>K(srşqb`BeM94C]IaGf91`81S4)}lh[:w_m| `@)'k! D8M%g!Q阄ZH o{,> H(1\0"RI58; flUDAHQ0AxrTz;LmsRhI!.BXJ'M0,kFP`j {>N.F4+6?aWP9&:c5x1`Y*Qʂ\= ~d;| Ɠ3ZD ,.GYO0O]- lU?_dpB3e!}UeD`ƃN]N{*}ia2&k\́tl TPRH-88B tW O4|! A1P= Zga!n9IZCE4T,z痳)`D_ZOGv"f3?\sH.5Z;]%PNヴS\Mݢi׊S~J#m==>^݌gwم0V œ'2<9:}(6N䣣a!~4Bƕ5Uը\Y`1謝k/֕l4wΓӓɺ)y#Wպ _: 4בZ1EE_y*?rp6݆5M EPcNFۃtr~Oۇ}?~û~ ?;^u=0 J"I8T}@U\߼j^ڋ.[=m5]6yEwOz[$-Vcwl~ƽN׫-vPAf5?rVߒaʗNs;8?ݸӗ7鯵ng'glN5g o-N9q}ma؟ PMvEN3RYz0>,?K:XQYcN8)S6I6 6l%cz#iU>zH< 01g_{I&Qs7.WZzQP:*Qƒuot*`d&oBu WY=;[>EcŸ6I#V9oxg'h]|ƇkS&Ut}:6&xKS(˜ nB:3Ϛ7҃;zyIu߮nB2]5BYt$4?}eI!JdT|}1P1][r9>`xV׀\F ?W/"dj`p,߽Яzwٗ DSe~=;ӿh{1bi8xs֦mB;Mr֎VvqϠ|B.Ima20r1F@ɅAZm=mK)K)ˑ)' B$4FZ3ưʃQ00)%uH#ƨTTsv,oSr'rpㅃ^Oy/Sˋ2+/eGY'Stx iflF,q$9Q( ՐddM蛢`z|Rw֫wGuӤ d w4-OJ1KgиR$8x!-h{OT7# 8VCkҍ<rHvu}<+Ĭ5e CA='th|ؾzlXs802ߛƐ&M{T=S zguţz>`1*~m}>xxCg68{-ly>]׽wJ~xsm-J^zrxw;uۼ͘*# r-z#K^CݖbAjϼ~>Uvz|GtfK3Lky; ^d)TcȍgIΩ [o#⭃4n|t=w$rXi+#VZjG#ijA/ޫobP svuWXX)'{(S,b)kJ)R:jF tA}iWX}/wL;_4$2[WPbh|0h V<8L"02a( ZYkZ~ ~2x)(s7Vx}ml>Yqk}J\S"q[=QCR5,G3=UUu]M)أu iHN-w9H#aRׅT.yI4mPN4_eb 2[z>rt_l`FGMAvkCvnw3G;8 9jBo]jѧ߳Ec7jOO"ۜT.vq~?t?Y6w J0snnvfyL6zmG yvaje!h"-hCY QGB(#=ͺz}`bD13 u4ˋSJozzq줣tP:R{v=uD@tڶMWU`厉3wi[. 4տ}>4 3,11Ž&ru;i+%; xth 짝AV=&Q&U0y:n뺋GNahC:餯J_ng& ;3Fݍ%I @ϮO>e.I. {͹,(wȀP2-,S 3֢Νw.6Ɠ-Cq{ePZW%">!-MGϫErOSH>ˮם\%EHj_s`ٗ,XL'RFL9v3?1'x!(!:/y~++gt})TQ]C"  ݏ MC*A. R`Ai7uhreqc*h%{gqoR%9A [b[go/SLz7,(_}+V`.:^ >j1'EfVb:y@{yyYqrp FdY.)Xr,H~K!9:!G[g ΓNj9kPJbn45K0CŐJ1*~Sxeeg՛ޠިmvSoƓdg0@vwYi>fE`{ΪfSPSi˷Y%9?y%%%׽qΩ$_QXrÜ9Q [cI"F;Ǜm}N/_x )% QI:k/3zP1G*rneM+2,"1PPkZ ᐐƶ][gw |8"F>}[g]!ijԤ\gg%p}.ZVcRu 4|+X`lU®ZްбȮctfS K٢P_r^˳|N,$r F@? ~Uv;M=&a Α@$ ӤbO[qfɸ/ lFf`(q2d],f`_>gAhz4/=.p5c0L]$OlͫlwsU*5/xZ,=7y? y9;8B𚸭&dF~'_?KEǸ/'q ) ,YG%$uD9hkc:aLnng'Pty LqY̙C:j`XLjAe6cL"$V <ծ=Z:`#$96Wʶ"HKJ+lгaW Z*]%( dWS3bW0ٰ+w^8rv:vvE0#v%+܅ePER~[>Ⱦw~= 5ico9WVEn$GºĔK BJ3O A8u<"fQưy)gcͽ1*|O'}@?%a~-čQ}S2 /%}M`^5c^-pI)]\ֆ.DH!rə:7Ê#'t~/,=?\*rȣH'Z89 V=LJlD Yz )]LN˫ebRlb4Ryk;]yָBYus-WpBom>OtMjRܬWOQ~tXTVIj?_KFP:m@Rɼ6q&)|&K(W:9זrJ'ƆjASWQiSeU +r# ztz|l:+}ɠ#*\9y ?0>\g6-Jaޫ.lEF"NAc=N-!(`!,Uﱽ|S.jD.b %drA2酳LX*bAc* jYf ~8.F 3$ƝV2e 8=H&H cLyVHsX7A'-l\<*Eap) [#(#'[a;?_vT³2D8KFqDY .=G4*\T;$# );[Å❄wf ?c>j bJpf'/M-D%Sb\q6\$npXqRD g3U9Y6o SOV}8#!(b{d; dB#L9ARP;5QizWY 0+:mqkr +Sp,H]TA  pX|Cワy)e7`$*ʾ+8٬V(TQ?:u! Atj Wpۋ(U:$ɏœY<\s}zp>/-AkGo˫rm휡Pm$1LoB #2ă#1HMÐaʀG@> LNW/h6f7^59<9VZOiԦJȌHF"#4>{%k|ayqJS8kYoP\ /|\__|&_`p@-X1lkh ֨YW ɸ)7L>Z鸕/l-ҏ?Q]K٩YV : |Ol i" WuQ@{P5wk_2&pZx!eJiQx@O|_R?xAS/%q*Ƙav41F߬@q Mc_Ef SC$U*IMn4ߚUr>+@1lN ʖ& +0vB7LR >EA^%D:n֑?y=q9}%[|A2B;%jʧ|(JB&Wu3.ZfKm<ki ' #hcȼr)HOUԗQe6i=E:G€Z<0`Wb\€蓬$(Rwa/' 2$YMӜm+I~.sj,ͅWx nJv|_=.t 絝[z64ˀ1T%kb֫T'a#A2^ wE%KUQ|J|H)z_\7}qؐId0.󟁩PKY5,rzI֗{`yР%2ٮFǙl&ۦËv#omD3ms_seokʽB|vCȜbw\]8!mIcd,rmP&!3Ńt6W;w| q X`ƈR1\#@`ZqQ#0YXKH9g}"eh0:y"dٽ =;@ґRDIGOIʾI6NJlRˈQL9XLLryå 3jg 2#,R6NVFYڱD-c`jŚϤovhcQ,2,;EI%[7-I)*~EV}eE5l&@݂>7ם5F{ #z}"ࣇJ:b^tǨa)*v6],:UL}`,A"l2. YڐI"f^H0{Br>S5YkOF9䢥CPe [SˀXuM‡P62A6^u3-`+巙{yq.|_L̑|I?ƭ j)R`1 )%' hmF 4N&Y(A@E2u J(ѳ6+-ޫ'K9 YXJ7g/Y?ךeU&ýկ{\Gܧ+zPBPzgغI:*,q;wi sF`5ʸ Z 9)Ʈ:ԡ%U~-#%:\wA&w9R}pbɛ+<Zx)Kvn| T.`4 ;k5BgPR}a8Ig-+f`O 1d,9|e4% @lŸ3EDd\.$UJKK he!P(dJZ`dHymm6}smEᷮ>--ܯ ' 8?XY}3,sՆRb-0g Z2y,9zm3^Ϟfs|7p}b/Og 7Nol+)wtE]g@+T#΀O:yI}{tǜ N%?CtOA|5Qgf< l}nt6Fǽ>~I8NvvzhCΦv~ .z9bwf|n}IP=C:'Ҧa:z;22W[gU9[M7G7;y ]+SϬج,y-1Kgr-ݭ *-w3ۮY!f_^/kW)Ϻzu~`bJk:8SzիŲSMBhIm /fahmaN=ot9{+wLVxo,l # ݍf.i֦#:ڱL ̈gŎE-Gk$-'b shCmv`-Dhp"e/5~W Ь3nC)2N?tQA[E ']xJsu:k|4|³ aKrb(jԟ8Nym4}z{2 GC9Y#4Wb,)rYxR}0!Fч7 g30Wey{ bYPgcW7Y¦U饞x 5yujP{=s6ٟ.GۻiQpE92!m;u\MSJome({0b Fǣ#F"s*hQhhtb PZzδe3>y>,#IHhJdIxp"(6gh]>L:7csϳA1{NnZ0mnvtcDuA"y,qbzz B a'ݬ׻)Cj~(ƲovZhjM א 8n'с<}D O7ygWNl&[: ))oJNTAe))pکhv DR{2pFE 3)ٚE8qg|\.C!Qv*e'Iaʣfv>`s_1A˻ކ7':0 E0"cJ-> N&J[ac8'C m5#5dKY:FB(4֕flw)±#BʬzTRyj:G)Z|(ڠ) 6f(_M*z_~z"1IrJrT+W|K"f^yWB(2$]踤|^M&F7~5:&g SA% +d VH)YY XH,iNxD) UΘ691mk5TYQJ0t)Fb5 H2Jާbt[9.cN=Btn^V>`}JV.Ԃ\@GYi@Bɔr)Y82Uʹb}SU{jW_||Gh:9`:QW`x %EmeJgYv֜ KLcaǰ=0Ͳa#ܤ=eZCDt}svO& IwIo˞49OyLGr9[( )R \5Wk&f͟n'{+uɽjMK^oyڼ^8׽jnRWxIͿ<'hʳˍ=&9ǣASxlj i%m)OeF"Dh0$Y[XA$ex`OKŦ'ؽ;2YY%PH"Pw-J9-l63^i hl ;{ UG[+!7'TCqFw4VA' b79T:FUhںdMB$u(٠-)o=l9i?&ja )zݡ-meJ:;V&"YٓX BTM =۾uױHK!3 3-X'59JUH1yJO2N'MR1:@״^zMۥ@)6UO'&jWu e. ӣIQ}aB`v{U`'WŵT=zB{0zi%:!sKy:檊TU䱛*zk'4'csiGnpJLoޡB|U؟bqPbZR{4W}sn\ lqv66dz&t'TǠHG>=O_gd'YݞP㧟<a߳{(݁=2íI Gg'_`eo_DuBJ+Мbq4bR۞]obA+ 阫*ҧb\U){4W>O2M_]'ߦgxd42?~ۊV䗳~q&LDQNGg X\ZJUr"^ײ,$XfO7 CB(un~}}0Sl2Qm_C~0|C~~\\zP:I2"*?Z8A g!dU\/(LFYR(^-J-C轷~gD|A>{%V/&f!S\'l?5M4<.BĮ(*{pîdQz1Gʉjv 3MfB/ICZ/,i2&:"b K`jUSBZЈB/T_>,[@CyJ: Ƌlp]w2N ܮgEyl<ǟ}O?߇+}.`~pSפ& !_ӟ*J_!f-o0BSelئU>^uC=]9ۗhZV u?(~oQj#t&+E\GhR%8nGth#=B('D,\ N1*%gIN#e1KJ8)[T^_3QVW*uusii/5M֏G11QB'+邗nzX]u z}q86RzbgSPؕɢ^!Q{#zy2Z=A"l2. "}j̖1jhrEKV'([ؚZ*$l>"1l&ΖD޷1cWo3!?aʤFhD]:yF7_ҏl mHR Ā4-+PYˮbA.X]x{xcdQˀe(^$09z 鈱i(kU7;bςjSܹbLE%~9EˉUKJ92*}8\+:}*]w*Zu}`1w*.ͭ;n=yں^=pӇ6=e\?e-v>ga[? (yFȔ}[k{=mv^SPKuO[^˽5nCn[˭2 Tށt*IB,(K*t`d! NAun澤PRs؃)%xTj $hEBi<J0AXyl )jҠxGA'uI(c<kx. CUNHRI@hkw z{af;niA 5!)FrMZKgP+)7^H xwxbUNyh7r?/x\?1aJsО\|dyF__ULqQ~H3S) C}e}MԽLe~qu%А2yYE'dLfw}YpB5ٗN%vrENΕrb:^I=A%xy]%AqzB;jrVj 7 }6+~(*Xʏ 1lo6OweE2[%\ٟ?k4ћ7˫ŏ*5Ws..r>G1u56qͶgwPjZK}{ݽ9*?6N\x{=}n=$Frb-10 ӵnmEj߮'Mђwvx8 i ֙@m>ʹi bQp*|4ٟ{8pum=Қ Iq.#yb硯k_xFȚA%_] ;_pϿ_?=gwywᗷW`} Jz6 |?SoZOSRg ͼP[}(>FygU:oF8s#*Wj9F~kUϠ+a (旷 ,l.QkaQ!1Ӏ@_}_Z Z1H*lgTZlNUz a.!B+!tFK{GjD!zH^ -ɻ'Bԅs6SOs8MЁHRT˩I tA"A:Nɾc#xclYL|x]B$AӼ E|B,ޚ91tC!tg%Cߞq>-L`T8HN2TsW526clBLSUYt[G_[sCH\g ڐM4q0;c$:'-*Ji/S`*eOσ}/lWl2H3u[wqJhxP8hKYb,+"* "*NY2⟔ӊBHmOVP*Z")x*9b Ɍ0D`4c[c!sH 왜etH"DٚcC;>ܽrb cj);Zbk%f9Zb&D8sTrpK-& K k21^^ei,M j2pꙠZJ, 8 Q*5"DG@ ÈD$}AgehEE[R Rq սrgWJ$ή4qf+I S]4"tP7>x̵A)Ռq)U[JBhIZ)9|Z 1-E$.}b" vB Τ&Q1xΒj}B8XߧE_Z?TǨj 8 qd/˗7r׌60 /bs ~\.G%!srI1pgRzNrfO f[68;eR9P=Kg=7BI;I8=J\R=S+աWbG*>*J-(y l)We ewW "Jb"a:+mxxiY,NwzG`t~単=0,uxrzC\_z7Q0 AJC;sUI{/s@Ns߲U`tY'7%f;Y-{pGkD ܧu68;| lw'+$N0'Ku'-rugⅅbmW, /ߘZXzݫĶMD=jK- n3xc^GYh-Lnz8^|riCrK.NӭY3wƑUR}SOeryzOu+ZQ薅 /vLlX qhTk oƼtW3g7$?W?nY]6b`xONxg#\z&nެh&lx4J u&:Sb4tsA9PUw o!I+nv%PBrK#M2"!ɨdt3Q[ΑM7}Bk'6YLu+f6ޓM[GJ#d)#BSA{:$OilӖϫ&dNWR SYp>ej:>kuH*BCڏe< V #!JOpKi &(e uAM@ GX36"-z@C%"_merm% GImH# a.ւ4+vj37F#[3}:G@4hjdX 6@uFhfT>YƨꛡvC1|іh԰N)f!~&M2 q(nNHg{!kVWVܫA;pO@w_M,la0\A Ȋi+U:V_'hyw7STTSO˵ %MIHHsOEVG﹥,FrPʣ,%%BP5ZZ"JIBܖ$2AO9T:[pjY R)E4Hk|r);%7|ѱ^>S0}ɦe)%$*/LH$Mώ>&/CY]%1~'e SPa:gՓZbGVm(ZQ$&Dҥ,٬hCŸ@lNkj[BU%!N hyTB$^vDL?.$(ݨ-zl>jT'cݗ1)!RBQ&IEyPR$ѠG=M gC k tIZ'iņbgnhEgi`O"gKVS0L#jsΎk&4tJ~ ]3Jv ]_aCW)9̉9ueXU^]e*E^RDs|v葨]^ 0|~$hLF߇YA5'aש_I*ryʠY-8e5"~?_ M;sfy/d ͡Y~gNRjgc%)sxC*CMT\yikH7[cOyraX:+_o^ƌfaz43 gq+ڙ"EȩwmIIq 'Xn ImpgCr(R8ⰻ Ō93˭")@ *6|S2:UPBrpK: {(檖28?O-.?# Dڨ|& t3/z5Jn8%KvP绢]AZ͏ %L)V?`~{]{O0(?> o+8 *%Fs~= |[aةJ Wo: yG00)\۶tF'K6qE`~/sA(G%#Ӓ)E 0ó9V7s# &cex++O^y_^ή4]sc>qп.VEjn"^6GPLxv&tS7 iч(QEnlx<|u֭U*EB2M\FIvRT_ި(NJA%0?,>?}y~?/^~O޽%Xo$ȃI =SvZ557S5js7yx1o¶Do_KM_ ܩjVgWlEܼyNWU|$CyJ.]>}hb-lct:IΫ菛{^}%ey4 U*IM OhP{_S'LqeKZq )\ 08y$`IH"cSё~rQr3r{ j;d:X[*DmrͨatP0K?xb~ć6A"_WQe{}UGN|^feѩ  f|+F[|pUjBWZ >ar6kJwf4L-Y^4υ#.O'hk >t:,Oj{RRP-`k U=BzQzbp6HFojRz\Tknd;20rXסrr@7%{CQ<O`|Ew۰ɹ/Ӱ$@6n1^_^};+߻uwޅ jC#M%:uʼnhHkJ\Aƕ#(e_bhKC:~(Q%=Czv"Q&&VQRƌƁZCbĜEL#,8ZI}j'Amiaz\䘐l"ews ~VExvP$PJ=N/I`Z>RC5X 2k5璐h:wDw rv+ڱ#G؋NR56BH"Ft%;$al )m0\{<e 8M(rnSBGEdQ0A[R'RHD#i99{"CM Ai*=vM[?}.R;f-X[! i fɋDcTi\p7r8mÝDnT2}^EEIWpǔ=ab {k}z:l[X b3 ɘ&S,;ovX޲< oɝrq" <"a9C„ r렼V&D\h[&X`T9F$㑅H>HԔA I0<)c"-{˭|wNWGr<,x|RzOtq>3=3>rƷ_?6h*ހ\"ͭў(8rJEsFf;"sFv]NcD @Q!%UFGR*:dZ.kq3,4_|d vmvyl_NLHھgy;/?OCւ 'B b0\gFpYr3Q9vc9)(rsB!v6%I "z1^z;%!B*Caj3jX$"﵌FMPH e!--i99x./gUSilh&ɹm h:?`wY[kTc&\-CVУ,V+W1I4&CQ߫0^x:v[]<@n|@x2'[i{|xk#WorMxv*!w+[盟70~KX|] LEurYb\5 f'Ϻ<`uBB u2nj0;ɔn#j'6lod5'%kۆK2]>p&isr+!Nwr$,.M}v ;:=2Mi6f \x}dJQ}Ž~[N|`cDPI*/a^]_י Ya^6=/!;M$؃,e/E;Xr&[l],_Zmʒgz;v,YWŪ~QY[ʣlFifɍHj'()IA0bʝqRL /&!5+5µeJ0U҈^ĭ yHܢә!!y Rj0i+&x])>@/lǕ^dL%bF-/+wȜf}\]޼ޓy- TԵuyz>-2i/5Ib%N7Z¯& {Б?Cקs2e˩}H'50EI0k }4JC n]I.X&2ĬV:,&f|v{:$f:#.9<ύlo=VϭHfכվmlz`h}B\0BbWJtNH^Ec L(o/rTdbjRo dN(N4sr,ʺRM-jBCk 3 I? grKifoe ZJrt=ASlR+3.4% .lŌHcC` SmK,)o5&!qッ tXR>ZډLFߍIE{fi /`{q|=(γa/Hq?Wfo)G;lEx~vtͷ&Iw독'c+Z/yLGm=,,cdmWRr&s9x'McY4BrwiTEP8b³^ 0 EdR\`::jQ1s:pMJA ![I?C9I xU 1BVq al쨭&f6Vle_{~u(ycxm<^*yӋp;uױ[9閳BCemLF\)B2(hc$I(#*U 85״^vi/Zvg~?%jQK6Hi>Xi_JVe1{KمWwmaJ?{UЫ)'x z we+"!$6FYd/#`%HɵW=iݦ,C Wx| `@-_7 __2[̾;W(nv_ij}hhQ\5ЈYjG(ͼ5k.2@`YL e .x0Z6YW~gX{AA`Dʼnpcy ҙo=߅t= Zȵ͊s_@z#ĠעTt3tbbշ;e6c '_FAœW>~>\k|o1:?\wZWW&Kx?`'up˘,Iqd#n(t&8㒉svtl\#?Y^M5{[#c< tg,/+=(Gm!4 F=Xx]HD#fyJ7׵)49#_Oէv^ˮ]oWPj?i~Ӄ˩ں9C{UpvW-Ka<<=>tc(6,'~!(:Z"%tՌnFfV'{RĄqFQ,;x4Z ӳAw[ꢓ]vX2ȗNʹ+.t4 4}>їZG|0Zf5lC5͉v:{~~x-:bSd$|whj[4MM;Vߦ]UUgyromH_?_(;Iau9#5gU5^Qşؤg`J[RZYjKq@/]9zHb;AʹF&fşfu8ՌspVNв.%``wCWYAz(&L,EҪ8uONJHeuFWB+,rK#' 䕑6}& wVސꍺ,U?Crď 9)ґ嵋< Bp!Fl*5X6b`8.{@*E-F)}lhGH_ 퍾ͿFGODSd~="Q,nL?6^e9VmѶ{֞HZR^߾Ѐ xk-ʄmb!EV"SƋ'!'pѠg@H!2+%RcP QhHfAԥoٳ8=RCbR\sM"W<:2qzAXӽ?g*Eth9VaQأ=#m '舶x p4/P.͉ MTQW@IQr& rJ*2 /m>+݉Qu'T7ޓnhFGCrqP_a[Ex[=3Vli4yݾ 6, 8)TP&osF{#X3>È1Vc;^haTb,Fȓ^qd@V\V0n7.X:(X;km$A/aDYTvVg#V }H'O')Jes!Mz 9& `1ȲQEvxŘmĪS(dZI0 , }8δjmb:2}f2&Ζ?bP'ܯ/N qmg7.'15rz2 ysZvs>IRH9%a5 Rl%SG`K2Ʊ,q#P*oYz!(lFύ*G+kp5q6k83R w.Xut3n,cYZSF!_jVl]KsIr+9ycze=/{pg>SL9ߝh7 @CшЅFvue֗ٙ_6LJqd^P\XDZ3 0aFJa$\DG}w1|j+ڰVHu!HEVԡ6\M,.'ҡ(K Pv(c\Cn׾<= x~11MnD̽8)| 2˖q9R8:62ԖhVJq{)T^K2mKP1@C&j@ Le6,YA/ULwK:|sGvϘӤ^J7~o=wde_~m`D ,mErk7V(N^0ܙTU";Hu5]}e8dǃj Ub3km \(#s'q/є=|c_K3IXF64SIL Lɏ)\sN=Wn?7ϟA\͛C=Z]nc_rk$ANOaHzh^˱gn{×QT:g\vzcoouUYeUo RoN9WdUA35%/qۯ7nQIV-4}fDx<W\U1b^N1W<몡ŠUJ WX^ӆ%Ub[.RK- v6.t?)@/qZܐJZuz'Bo_q:Ư^~Lg=LLL"#-IC`qؤq1J<X$?5n470A%|pLOy3欔vLt۝"ocq)^;ef(N*:}-Ͳno<ʕ>q8 ~v{D^ "Hn 5O$5  E !5/6lvV{X ¦۪KR+Bιz]S/j S<վ3h?)OTՑ y=4BUjX@Zʚe,{S0cJF7#6!$E1F,'  qbFgJI`19G<(!:3eLM`@Ou42Q8k\V@phpI`$Sa[L}:O|ݍcx-haR2/~&MAI\2 qnFcұsHE~еЍqMh+j82@`SD!⪫!9t~RS? q?ptɈ_I_Xܧ26 xbXlּY7WWWg7"yBA{ISqק"Z+Rx8(C.pZFU !9G4Xз㔡>`)t ݻU1Hk)H# w+J-,2“PMHڇ|jwM<G/`ɏP}EJqAsťWc \e)AUs͕9"yE M\k1WYZ>xs^4WFP ]L1WY\y5*K{*KiD5W\)I_Bmm.;揅X]]cqa'+nzx? 8c9g}C'N>1; WywB/o86Ӈea älXf`y5z*4qE g;LKäAR6\}sŀWWdPsJ,.t|*K h:[rͿ?/"@P>E烷ZdSGN-Fy ԓ,IB)%a")!E DP4+7˽zo]BN$|#F{›n)`K.pGCf\Lq\H(Į\O$wv?`ӭݛNLL6_eo[:;,K?t8R- ?ƧJWYP taug+r@*F}ܮ}yzR1hMnDqFR^ g@P=P fj19:Ƹd-%AA$Bר)Dh=EKy)b"Fr?,f(% %P2 D8 "YZ_8&>*!1ۄk.fWJ7_,~.kٗp> cL(;7+rSMQML*oۯS_Mn18W%տ#)O>O?s j];Y^یZ?n?g֣A?ϝy3qyޅ ? %6N 0R `lSBe`2F$,Z#ՁHOEkaX_WM63_>1g㶷~2EHs/{k!f6i `˪Tޜ,s *Ȫg6khn8yj+X>u#l6'rTz滋k4*fU)g]5t^^XRY)^CU'ެiÒw*tPma?@Bm uxi8VnHVn%^-:k֟4s7#NH&iF9hCg=C-*;{Dv0zqؤq1xx=*~XmŜ~v//'1a?ϘR2ѳ"y{-_4ߎ/Nq) 6,D tRױk1lmv}yMI_E锷P@l,%o}Ff4M8;12"!^  E 80vƑS݊%gMUW%ZrjT{kyLʆkF 5gwqr6UIIl o=hR_g’L3]Έ?m'OTȄA!*5, d[-e-i:QI%MgSU#Ky B"\crp$NД94 ,1<&,,I!I/q(]*tB~:9jF̼"D'A+$LXĬN`Y'>8QGUjd^g4l^D5/BCDh&9t^v2vD26X:wY~3#to*5Πx wʉΗiqO*O/([5\ WљJX++Yb>gdyw;ٻ*6O]l؀S9O66Xğ1F@tH@$GD][bYdi9O(%k tɊQ_ks"A@ŕY9m <&q|[ԑϞsTqv:Ms$HLwvoLwY˻^^op#e6ELAqREf)9\}It0i-'G|~#猑T[aHBsuFY,"Eӂ7U3'3 D.!#6GTZNh-YDl\"@몯=Wr5>yccxlEybcE˯o9M<9OV2lBuKyɜWd{j-"u*I XY_O JQ}5r6ڨ+X4bkX[ֈӈF\95}]w! %.9j1==ņ,f#0|o_jOˏxջ"ojMuJ٥JJ+1t09[=E]ۿ;4Pz.6.i8/Q k(Usc=k/nFvyWxs9=]x c$srA'{fn%Gˢȇgop1V75xI{+Yf$ ZG Y,+h8Ot Wj` _:) 8ұ&hKӗ_W]?W iD2璧r;8vVR;Qi䢦0I^h/k9׈,pNjW z8 5, e~tRgѽ$tMLf]LbZ:_KEfz\z?5o^ٝ#D/j~ל0L/I|_Q?Pf*.^&ףgo;G^{Wԋslxdjr/Tچb4aרT&J)Wd3je53w1SUO]C?]&&2+%fAdh7ެف~UhٚB`}3AiSG}LM5KKe<җO@e<6.1w c5{CGLLFYrivJ:J,}4g-$QrJ*2W\."DJ\w{sp{4 k02CG)8"tl\L8R9ƍY9uvw9+𙯆bפr,HvH}| 2cϦmmwP@P )G _>JS5;['F7pa+skDLR)x2eU R)tsO:3/ G?A|4pL2b&ei<, N{crh3ҝB 1Jڨ(fmHL&65z+Ld\5r<8>VW>'//3"}}3ts8_NϛgfkSۋӶǞ{'MH!yKt. ,DYKI+ɜ*(2NY:8k@Fu.\ɚu<[,iIwdm6Bh'ʑ]5r6K8+h4ͫN/=[uY)βw;W[nm~ \ aO/ED&мR>o^UtJyX`" u.F੎S;V2YX)d9k2;XI1F@tH@LȕHt LMN4m-tL)<2]9&ę[5pB} i9$`}} g?{Fr q^0CrN({kd<K{jvb-)j_ݾ݈^CksYf.>ZтPЌ'ubNt:G:v&]:]{'ym{< ; ZeC^ePы;5zRڎ RF-U8YnfMgmC XBM̍4oژ!Er{CZ s8h8YO[&ΛD g%3wx9-'sn_@WD׷[CCqnXye#KfiPה\T7W=4wNtpx4eS\A2 ƊJ)ߨ $h*)1BFsɔj%zMm"m~>4|NyZhEk^vPYU`3TR@KYҿָd6B* <2QL}ATcJ#eȷIGϣ#.gt)A JHն e%Jى-2i T[wjq"Mܰqm|ZO A|[l-KQ"r`6$C"1 ăRGeDR0}քYs ^̦P:(镌ۨ3{dP26IrH6 Bjb0.8b=:!%+3p׈ tr*Eg0ȭQZKrhd!/V02 fSgToI9$/:†9#(9IlYo[\c*q_,b\*[DY"Z̳dhWRY&@2yZr j͌ jaJKJlV%胑|LE2!'ͥN3J8-sėBL2%-k5q"AFѪ.w7 BZEQfV*A+˜hN*/iIaȞ\Y7YA{zUQsF Y@wګ8dGH$3Fz|Lo[;YGpo@Zp^ΠMV n(u$ Q2$&l@R)bV"VxV{w<9ir6H q )7u'GJDDNUotӖu.Yޟ4]T.*0p!3\fߴL[q_<OػL׸|#f() ~tGZ D8rq:oG+2Gi`whe`=s yJ>&=.2 o4!ph'eU1q>;\LztF?o ,_^qV.OnTrػLӫ{Kچ9wWg/(V̅9w_<蟞MV۹]R~] 'w?K%w"q$>~eo#,.1 G,Xvpm6{W?w=䲻.{Vm!u\NLGʍG }=&{%"~̎Xo49g&jX15`^_r}8~ɿǯ'~p¸9y'~}GΟhRh"E[ZnMP}eG^r߇c5߷uha $_ ,=)N&y^ Mj~>Q{Vߓ`ckB aeq_iS؝>]4lfsAy;Ng`=ecsaqER%A[K7>m4+I9mN+]ʞ+A c  I-mKҸN윺׎sӪ}ywLI|arsF 6dLʞ!d1!8]]_K&x5Lćϥ<~QԏxsJŻ˷MY fS.鐦gJ4]5_5WR 7YXsZ SH;z[\dE&|GOTMxAT:&ļsps, SF&dFEiJ%yˀ~ eS?ҭWN?]bk} FenػyC;BL-96Mrkwo^ki8.A4Eb b ^NWC>ʛ6yokz-l-ZvHkGHKA U$e&eL#%`q 0wh%h _/v#_|FA'HH!@bIɝfBPܢ 53B֜ZD+Yyl XUo+҅m㕋^<%af]}yQWߵcvPmhi\higc ms? ( sǰI `6c)^'ӡ*&pa|8#B$%%5Zj (?R:˒6JBPf‘#e".U'pAPltw ҽN<* K,7wߗt;|vr%y ,&%Ѽ+ġ4o+"{"ꚷmJYB{=8G],|v|H \[uMZDyv۹Nmp:γfl[Ȟ#Ksc9eU*eӼ0HXIƼ 6 s]yDƅ@u)BF%^jd*ˬ!0Ϭ1&^\+ "YnAce`břŸ)[k Xǟ5Ae=q#ޑGl87I]8- {GGxX~2f 72zLCu[0cۂz/wG#b)L 'Rdh hָ u[wrɽJIaZb) f|L.\e1OΈL8Vٰ@һ\\~'>i> ۇ]2=6Cw]Bls~U ݮ/?vY|ё^El4(`"JVNCĘ`,\7kS֒xIHgR%5Β|6Ӛs J8RvjGqǗU^V?`W?SLhgRV, > U7CYsJ\#-)TL(sԪSIa7̦d༷LxL\&P(e=Wqg5q6n~z糧+{^O O5COw͇VȕGSj9M W˨ĠB.oy ]󗫛ի :fM_=teCWn>}0fZm0•Cq7vuP7Y <]nE[N]xHOw7Zϭw nziʕoGsuy1 `Fo2Obcoo1a\:<Bsy?gMT\ +yҒT;TE@l Ry)Ǐ~|WR)d42‚F1~Jc:R,E(Ƙ\xAHA?c5Dl6Zk!8` T:$RgZfBR B* yY䗐Z wx2 xvPT8#d/G{V\j%Qᗦ<% T/d̍x݈_;6柾륉Yc=hy$nC?d_:VI;iq )Ik@Τ;孢SТMM?/YkѤAw~?ӏA߯vǴ/f׭Po+Cg2n^:/?+kn$7Ovk @b"e;=lI+RӖ#oC$(R͡fUY_yog18ŹAYA I 6:E"b9 >UjC )Xɚ!J6XO^kM%d rJRZ'4u@G:!\[c|!^nM} | fSl",s݁d>|fő A>\yFss7J>N zzpGOZccK(JYE"e kăԞ%eQ`Vce75ƺJ-qdhP"E=a."}w! ^ x~Ky%$@aEes?5ˌR35@JU"a?$=+PDv*UI?7IIb{ox1cVƫ#0 cX=q#?B6VO'f+LMņ*K,.jiKHr9-ayy٥zuWk[$Lx*6 O tZd.,){PSxlP|9~ypuAڢ/L l8[3ASR.>у^Ыe7Γom 뼉nT/,@Ϛz:{4'U\l.WQ)S}LD%A2 ԥQ.`pSB*LOGYL'aV=W BG:q}_uҾG:)OmI£ѽGK `WUZNNp J'sFpCw6pUŵ\uJ\Wn4ZneGO O"|N|@}BocGӓF- js{npH_:vyKn:$lf#S<ȱ2x_C4d:$b@V+sJj)2a6eAe{$bJ/b>z<Ϳ\exs?JS`tS[ULgY? <9PuC?_~&/<|U)=NV=W>87qPK^ti]i.q*7W˿RYi`T+rl* j`HtP3R(˟SC}ntP6O>1Aa``OR$ˀ+dP#U*ml$dbrrGwPGhwNZc61 `DaUblJ(Bt^צ+'H 3S_ZGI?ʹٚb5˧]eO 笰O~7ߐBNg؊>NAztV)ew:igpmߓ;|3Bu0v(ԉ(pp ˑ nپ+%2=q-c:w~%dB5^$s6R)RIHZbw $ѶX*}*zw {u@ 8!dq.{PVP)BuƢ NsOG B{4mɚ!J6XO^kM%d rJRZ'ٯia~Z=FNyH}S(uۑHDGVx&sr)}K 4R˸MR=kGGy$:9"A25AjOZK]JY(Gf]}U|n?mn7jv+ ANHtْ-VK eaU^tX |6 ~cOommۊ/j_tڷca<(Ll}ΕGfyQJrZiJ='{Ka;r\uJi3\]Aw0oinAs2-Z y1cVƫ#0 6H40:n:v=rs >7Y#]^wixڮޏm[.݌w~7 `6hA87^պӖ .ё*ĉtr9ayy/m #I,, H“#b):,K$KޅZ<,(0< O־&hq'㷍^'_Aڢ/L l8[3ASR.>у^Ыe7Γome鼉n/,*Ϛ":W^jC^xXumh6Of$8bRJdI¬Ef-CmP 2 RIxcLI>dGqڲSbt%m0!Bk> Q:xbg IY CNDҍm8;&? d)-EXvĕó^8rJ*׎ @ވTu`|*CIE)0/i([@:gBc [h)DI,6HٺF%BN}q*'_0IítZRmWSjc~6:%aʫ2JS@)f$f3iQyml.H3O'uul.nX i9Ѻ,bx;KWգkj,ztڋT' d\dƾUO=aqDR򉢑Q"~ZAdE1*gp6ǣUCִݮDسfOyf)1ԥhi.Fkf|" ŢB (#N=Ew6><@3v-$< 睨 ќ|O׏oJ㮤Zlz)S#;T:-acUѥ"r"b~G'LJ^y߯2c`/Nw+ D ^Yx@ϳp9'ms9,ߜSBb:KUDJ %I_wc2'np?D]-tz,k}I0Y{IJ Eʅ̽QFDQQvѹHonh:% ҰQ0!X3֐J٫(cց*@ b0` m&v͗wSWPvx^ӕۇSIk'&Q$9oô5-Z4dZHc^Kn14oP5Ϗw@t%M61jUYu\KDLjdLa{ Cж![R*!Bϓv CO5TޕȚ*Q C\F.XL3yvXz,D͓ ,iViY;D{]%钭O{I"3vT+}2;iBFuF>TBVv*7)pTyex9Ļů) G@P4_>E?`f_n(=\{i08]Ҽh(SJS m̳fuF,x6F/pLBDʪ&:5*5C'^~]Q?[&4,l֢F7Lݼ_ӯ%j_~8Kh Ljzj0}j4^Cզң*ګٛf ˃<H ?^U/o㡗 #䜮e]6SF#HѢ㵇ja+n&3e4[Hn[F|=Q.HI 4A;ˏ"1Ŵ J"$$)Ȭ Nrϸjo:Vl539ߦC >Կ$Hǰ\{~2J_4 SHZ訂RGp%'$\gpܭ*:\Lra3W10ZJ1-QiP8A$ */&G'eZՇD6(Ajfxɺid7B!N{AYɔHBF}/"ۗI~幝MvFdgƻl4"l Ř~"=6m g0)?u!T { LiV,;ʶDjUF4I#}:㓖y("QtU̘{@GKd4v$-w5úEayK1QŲg<=?\99[vծjCqM9R$>By՘WK@OhZ47m5g' _]g;ڽ=}&xMA{fkk38WtAt&j6e|j>RٽvXGl鸚fh v-|<6* t/W 7kGHHo771[S0 NҢ S0jym ?k~6NG,!U*2hɏ߉6ůIp؀A ddNh+g\i{#S\7Smly}D>ds9e(uyFKQM}K+xR`Lv &VUlM&6<M=WB 5YU[sZ SKڒutx;iMK'v!^B7<)ef~ ڨD٨hGM@$. s c wY$ͭr,6:ާlXրXoKz,iE UuF go.]CߎA)Y.v n\nO豎k"YB\lH뙐y7j)/shK;ȄYd\M L #h!hK>Uԕ;U:9$T(s9'<0܅+CIf`3#uUْB`x3QwSG}DX{2y~2+=:z&tK|4tDxxh\Rh>q(I$J:!A%^z[g&Ku2{En λ.x\RBKkR2D'R!ĽС09pJr[2D㉺-q_uI>8ȱ ~PtYok¶E z@ԽC8lm?WXI3hRjƲGԦvRv؈cUlZwZtʪT2WNr$;Q3Gjsd0<䤍0r_·kz<8Z<}KdUzj/q~wJmq|۪<$- > JpBKLLo޸]:?zF1_X-wd$a6ƢGz?c?VO ^:/eTV"@y,KIVx+m22oN&Q)ML Ee傩LT fDPsl7r<3.>V"nw}]sb.Og&kSۋ?Ÿ}:˕>:`w68h̒sD)8XQ*Q@Q-~ `QR͕Y%Τ8K+l399Ԛ1w4r F|'h8]G[gvv~eY)ڴv޲|&2'I_ejWjtJt eյ^^Nʾ8=8"LijevJCP&h)k;JfY~@R KK6JuI^d]*h'%33`ARyoQ,CVg9drGƽҞY58z/xO[yg'WOV__M9KI4/\ۖ?bvkzyn^zb`⥳gV8˹ټi_<x'o^xr<_Uw}^G mmۏx2wn?rB䡛Gwwk<8Pty ZCY%9 J%RFa V" aׁԈ`ņJ܍ާˋ3.dAr]+Ԥ')%wR *qYZJAhN̪BKYK RFmJchVqdhMCwQkK>?I,'T{]Q;x(dP e 4s1ZrX B 蕳-!VG iw :&Z]L% 2(ht heyF ei>@" vPM0 }Z4O a2]ʄO݈H&,pݫB $G*W3d/X9,LɌ@Ms+*.bi"6WmPVlHRpYXG-X#vHvf$23^msM(I\(^-Kmݙ&˪[S]=s%pYP0THSh' QYjXk1*X*ȀJ5 eoJ'aRT)}1!N-lmB7BJJJ#LAdFA* dBBrgm8dCi3Xd2zb-t Je5͠j`o]\Q?70cº0C;&;ŘA cG@A. L1R"8()$ؙ5TTŭGeұD8+4hoSCLEsf+%JE5noP5HomYCvb(l=dB AꕶKu2J".5{PRR}JuLcr^5 5$Die/E[{ 6j"r $C aUhޣƻ },BZI tfpkҠ2uVKQED' G1͋BIJ!`B6}fC,McⅮL-avTyou_kyO6}@-$t>%F{}FaҬd :AJrIJ%UA 0 CM ,$tF^8g Z Tx" D&jZ5`A>xmBV5$ U?XG4'*`PãPۀ8 UeS U_}^jqgE3 jbZ1Hܩb|~'^?lD>"C 2FM&PGށ]JrbPmB%| Zc=:$`m]J@?B顔J r[O րhg]m0W@XyG"+P(vkh6XX[vGJgcyBP @ B25;9vj:LMF"h4(=(DN 78*4\JCAiXY" njtM"ϲ@ [ʉ6łbLJ!eќ8Mh% Pf%񖽅ZM/-=zr,e4JX1lP_=T%LE` ڪ҆mry&nsB ]F='A=q'~?[@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N9 ]ְ=VG tpqJb8NEr[8,;B@RN {Gׂ}FNnq:\ MN v@b'; N v@b'; N v@b'; N v@b'; N v@b'; T@@-ʌ Dx@z4N DՓw! @1@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; |@/ZꗣgԔ~ry}^\^PݥX?*=L hd\"a-c\BFƸh7.! ٸKW^zŘ|7+ 75Dk䓇+R3\=G#֎pa4pEѰ+ȧW5 WflP1s2@K'7x(ᢳ4|ܷ>G.wEaDݣp ~"Z-m5#~`#+,x5QmO WlJikaGWc+ƧW%?KrQB~4pE~Qz/hÓ+DfzpqQwz4pEl hO(J!\EAî(񬬈"?NQjfW[Ӗ[k/=h1:;r#g_}xx 'a)s$+_]?GtuiEkN9NN#xO| Owk#ڳtC2ݏiV053 lVA.(c>gϗIz3/N'eq|Zyʻn~-mKbӓŽ~<> 9~Q]Lz"wIj!7/~N\hn^}>n֫ף+r}hQJ*i#{elBX4b>m-9[uUl6/M}+{f|:w&ETvM}-ur2sޣg0"7lfh׼Iwe#psFe^v7&Rǟx]nc/USr-rPr]UqDp+GWc+ VħW Ww/Wp+ \QF?u(]dzpRihIYNTiJ?^8Ӷ'mKJ5IY鄩Ά *r+6^J@}DfBдAA!֪ h*H, /CȌq;Cq&}(XJ~6뙻fޟMǫ?TW#M;tZ/q3f ϫyC~?A'|}:MxMz\" #b WF:Pt J+ XR[Ugmg~7ZI l~NHۜMfMN͖Ctʥ6}^|gl6cwŮꝟ7tl=Cm6&t0u: /Ջ9 =헟=l:ٛ?m[ϦuY?r;Z͎Nq=gTZKgz6_y={7mw?opE"^γZ%«e[XT[z\F귬^cW8oM_M?cE+˪˩JKWZfUÜK9;YL>n>[vO\zLTrB-_ZluzLJ.xqRz6ƒn?6-wۼ`R|xǯy*zk40 …δUPZh+SPsow+Dg}nck L9ꦅRt` ?$)*PZΩ/%YU,k^p8|'ypaNvpyR(+^Lxv'۝xE}MFGP{I&%؀j`?;eH;zy!fel-VxU# \"n>|.Nϯ;@?LbCw}Uw^z/)_9"sB16b/B;Ol!QE2&1T=#ƒ :j7^Bi)$!wi.A99I:ܘ*y"mY)!@xB왍ûu|y{Pvmmq{"» hG:ú<>H:5HVr6RJ9J <ӷg/0|uGT4V)Ҥ`Z5fr3]OeJyE?##g@ҮE!䋌OKǘV mT+MiFAj2p_L3X/S=cc@d躠nEVv|}Aėq>~0lLU[ᲷcKj8!Hҡ F׊A3WXS"p@wM+f@wⴽ?w;M}~Y >p@D낹uwe5|p2jYȓYW''h}@_s3\So6xwUD_tN8_ŭsgX2EG(!˻Ž5LJ _; KXyI0ƥqqs*Ru*/qmҐxdzOcHJԅI#qW+qF@b#:;u4:%S)`pb'8q֯sM_߹fG{Póˬk29~^ ㎄|W]-pwmG}3%SKwQAh="Fc5(TZ(ޖM.KO.# FW50fF'ZT Sn0vd߉>=oi.6RlkQ^۪0[Er2ŐNoWW[RM캛3s(K*|سA?_޵&<R#-n/.OJ1C*3r!FR#KM>JDDB13N_4HNMp.9 . 1 L> G|:h13 G *C'l_A|֞&>}g.>۶q Zvz,ÀWWU"^ 6DİJ=vRa 2lkm[A;Ͳw>o:T۔7ZΘF65m!|N>)G>'=;ۼ(7YEDM9+0x0VA69!%4xU(aLFmZosXee4ɇlHY@Di2KY&北l2N_O`[[$O%,7."{w:7_/]5_75,a]ŗ :c)˙Oy,fIhuKBV[*r:\T dbƲdYw1~[h 2iD/El0*Ɯa0L$X6 C̵7}Lr1&U+l :2z7kjld'lfA'ojte y7O c_&}0jx]R9ĒbD) $'Mh5F(D!ٳfOF)1Хha.FkF|D9_i [uugSW@>2 \?J|?/9bE-iB^a.NgMwqQѠ˥ѐ +>OyccܿVx2 >J߯V{O.#[gpu-Q 2X娅Q~Bk@JU)khdtycB{~s=Eu9x e؟Kngz]q}lŗI6Nۘ%нMj[jg+qk;{D:+) QUYu\ !eSjlLcVC4ْs^؇SBTS,;Qn\):XF {:6R][#~m+J9-L3i 2B?gb_ -ٗYOfqC&ϓ˧Ŷ: yG<ɨ%Qٵb&,t4NU6 &%=:qTs?TMel2:(6(EjOڛ@룓m49}ڝiGOcV2; X 2B؇%L-K4r"{JImFAcCf7h"˶&$#Kmb.Pj!tl;g=l\5}c-""qƋ(ȹZoVZi-UL4PbK]HUA-uj +TZjgbibSXE.%e-iN2l:gEu^X'W:;ӒE..v&U]>äQ|`QP;^ap{н#k;Lx3,Xc(4ڣoADvY]Ⱦ q|g_O_OZlZl/CBP#I5ET>4ZO{'=AS,΃a=zYpڶYrr޿P[E[ά55W>So{qUͰzwÊq18wjf oj;ٖVWwW[:W0l|G9t 3\{-)Wzխo.˱oɼ|ۧ< .{ |sWęk_ grWkLfid9gLFPLf8PÊ*ǎmƋ94C\4iAu{d5:in^g74d &WDZ;E%g^NS@ $(6zAnwD8핞$b7-K^|8] T 'cM沬L٤MV*Ǵ GA6i/T=Jb(%N,:hIcV.MXv2 &p :)-nO=>YQd#*,h:i ^g& 7 A7=!\Tmp%] 9<2N$OSʋ\0Hwވw;لFg wb1I^lH+ao|c>{UTؒ\[1O:f!_{0JUxVSr&vnd#{ QJtf3:/G9#fym3X5$EHU6dtg/q_x@c{d5=X*旫,dʣ0`'2:!Y췫o .?jrx']/BG_/y8]T92L˻Gnkqw]a9 e2>>YcQĪ60 MzbqcOtS7rs7nf]Y>D4=km`źM'_V=s|:o{8pzMnzV!uV]i5GjT*zN`[4jaKuߋQ:?|~~>~BUO<sJ0m>Y=I Ov5=S/|~u߻[qoЎN ~twlQbV3UsQuQ^ tq64V̕T%î/ք8WI7H7RcRcnc RFip(nt0碣dV]e <#U*ٸYf'?%g)o۩}(6!aNT+:I9h/ +шPY禝qD9ƘHe>˵H)+(b GU*E&?Bur5y_;ӉG&>tO9:T]?Uup=Lx(L៭(U)8g zuLeW*m}F)i|ȥ}()CVSjSmӾ%fQzp<ѽt['ΕzBTI]-C ""Y-\V9T *y)16C({ʸ r>kGMGOh:}u+BF7M~n=4=<݀hlW큨A=_Au2x. +ZkB"͋퀴tAA<.Ж HZX5Z6\hvHV 9-|Jҕ@*&KBFh-J10ң wR RRVRbM61n鬟 X;Rw3-[ҨWI/̟!%͟Oˁʂ\tttd*0φxMh%"GKad@482$,kyC%7o(%7jObn5D2ڢw0:xA*jW@]Gj YUN] 66Ckҍy>Z)Q fܸޗͯ尭#=V){E4F)@ji־Fk4U5R0Š{F;'b 'JaHPc/E St1ZcD\bA Ȏ-:g[憅Ė3YSܗ>1~֗ge^8w#Fg!䗾WgNؘds C8s9~U*=1D !beU`Y@Dc' TK)Z?{ƍ /fpN6IUR־˅5E$eG94C8$% %҂Sхaz05ݎ)9ӞSE11]6 T&1VHtA[t ?;g#g}u5|8ĕjq2@*{=9Ys~ktpV^$t*DDH-4 !DEƭ9-+qI{SD)JJ[ޙ8gZ\b%ğ_;U9ŴG;(yb :,G׊ʏe5K͡ﵢҲZu>"uLJ#)}4 UhJ%;tsԪh ) &GdTJy*KI1W/\ ~.52!]2*Wͱ׽ & F@5_zɴiD!=P|Չ"\gzW[bLO`ELOFz3}?){iUC_=+X8seXUVRR 4WTh+Xrs4*ر+Vr*Kt1W/\1 ܂xFM|YVʟ?u/?ye IR R!YE+/LH&f)@78$J$)I*H&&4y%^Q% Sb g|=@?~lN ;6U ̧Bz5t/W?؉ŏ=% +IkJvIZS֔5%iMIZS֔5HHy')5d(Jd(Y Jd(CiPGkf# 5 opiYJ e#%niexIZSrIZS֔5%iMIZS֔5%iMIZS֔5`f Q :cKa_RIT1ڊa*+x\(̱09z`)6B)!xtb `J袤JUIjS!?QD5h18N :g!K؄YDR9*;Ɲ#s毁5'~̑?Ln6Cόjx9._ߡùx}$^a?ڡ evK*{חWOX:͵(,]ue3uOׅ?7w9otf3Ѭ;ĖmݽM{;ovyf}䯽NgEilWtㆻzԙXlSϚnm*>~)Y8\UOCM[͙{(U2_y." ^-<+!!5uҕVL;4')c6@qFlUΣXaxĥv<ڝK{̭_փ\ 9T/ Jb\ByG0G@QDJby&4J$R+8Μ6;v(C(\]P` ] aeGq/~eI¥6< Y ?_u.N[^Ryl'>ykqRP|T\Ny3 uР,DUheVlsI T~p=FȵfM37*%XԄ["qMNpFD| graV(T:#G㘙,L֥ɺ{U$1p9\XÄAr2 I%AD#/[ }ZvHD s ZhdN#cY* GŒ8 IDjK?$p)b]^.w,x\(;1 aJ_B}l.G,Nz_ 1QDK&5>0g(O3w]J{J8B-/ Nax q>OF2S@  NΔd0bx7' 80Usm]K@qzl#@tt\/R 78 }l!kQֹ 1loԞӏwekCҌoM%ztrquQŘf (KON>984]h׊s\B-R{KԴMg7#Z5s}yEpvW00 gӱevlEj߮'uҒv8A-w"Lk;]݆fZfmṆWf6{/M'mϪ4]y##H@箯hR/{O ]Nw:VشS:vN/|/Ïo?ww?_p&@"GG #sv[nnm˧n|.wQd~&|m'c{]0KUhUMUYq?lfb4W_MT_ a/BmB4b6y7%ZpD"ȑ2lGdZlNU)SwlqFl~zh&qG?k+2egHo[#;>9=#MTٯhpX&;!$HO$+Fp$ yV!KHRޑ}!zH 7B$ą3JG3{t )Hq9U|h&T'P8z[utK-pgwy e}y+uVxkCH5[=O{K1lCyb^IG0+$W&AsFVƆTQ0&rLJ+H:>1$YtG]n%8ş3&HЁ}2Bdv%x7 [+2mh4.B{b[3!-ŨdG~VFME8ps,QZ䡧_D)B1~_#1]\_+!& r'ZW\ԶB(Q1B 9| 9\8CD{x(Bd'0^E H$ZECJ$ FSJIQ5Y:Gigg} REcԳC{>ܽJZNQr]uDn)yű!s_(C:e5L+7y[).>wQH⚆KQ*\GY-eA{%f<:\br%fBHx1XƅʹOi`X̵8f/J"nsؚd DkՎ F Dx㉎F&("8ZZnb,u ;g8ˇ ÐH ϳPpys`])U=[s eӹ4Ml:js251>^9]aM\KWՠ J wUVAZS+A+ :">:Hs!ϞtGdyAc ,e)- G٥rO( mvAǜÐQ[ITOшPT}"Iє2)U[AA$b u>:ƥZ 1t(!pr9% %P\0*6G3,'w]3q;&.NCE[??WGhjwp/WV`h!1( .D3:4e\h_<<jz a6p* \bRE#rN]!VsN c8AR.xtby2\~l%~;-{@e2 =1Q*{F5$w iЬ΄ז3zH-SoCqWg;OEhҞ>4;20cT=,D$jj9ޱp6{ҙ\4 ,+J Pjz3n ۄ&ocOD.2؛ 7u֝nBt |(\ n^xno#NDl5"BiRNhh͒x #p4I!Z4K'vLJȹ 0<]3;YI0Bω\%+JUHF%k:P9(F5ЧjuE:_rsv=Ss=y.ͤ>#(0hdqKVЃ?yGrXC;?~ R5|^MuReB'+K*Ÿ0< vy\alqMv@iYGiU~fN鄑HA<k}nF _6Hx5콤*Zǻn]*<%))Jט!)R9H{؎8Ct7_K`Aц=YYb8ua^$"TU9͔t=4"TX}5dz"?mEt\uBFA *s4BHa)G y;3^iٛvD:A3PڌrHB KᬶqQ(d]P`H Jc䬙k9$I߹% 0#ʇE00456#rm% GImHT牭:Bc;P: uZhʱJ3;9O5I9TLdm2, PetXTn-W%cowC1\hK4jX $ܹ#( !Z3Yu^B־J%-?Œ`P{z2aqw8Grge+z}l X,ּY ot z-&.0sclA,.4% -1ϑ?Z疲MA)`), =kE+%\$AeI Q9SR9ͩZHҁO|B?{OoB3⽺†j2:))$48.i:B Q`B"1hj|k W/?"+Tp>Q%+[}CчqǶP5Dڃ [~[{hQ,|,O H7 ׸^|됚}L8VH0hp2 *SXe* iqC+%H~D F]erBj9xuUWߡgB=uUXR+;tudUWߡ28y rb~Sz0^ 8wः./k.OVH&pǭ4`2(u0v޾{Sye2pkaO+=ۏ&()ŗ?n{f핫DJYl?Yc7.둛~8A%t,rmgЏcB1*S8B*7z-_ɇ2^I׃U'䊒׹Hp̜U,*~T2\*b<6 䅮Fc${Zx e‹b|= Ḁ tw,hd61Sg;O\ީ.&;74l{ P\Xd$(/Z3 0aa̋ \5wXE˱؅)wR(H9wFƆTR:6τP;ϥcqG^{rK>49)48!`Y^²ea-mƾ FE"S8p.uT;# udM# JCu#mP^s aS7mΰ@(hP!KR֙1`jk6llD@߂FL!LDA$0JU+h6unRK89z_9+nzT9 |ͺˎ-k'qK݇}\|n53 Z 73>~5ՌkrwlԐ 49c+wL[yq L=!@7vrA2 }'3/MԷQTUE0X'xGy QP?Ppj1'?ElbNZкJ)FR]yE &h͈oł>$oؾXJwCt]y<r]5֕hMe&ޟ)9G-beU|l?1?)ՒBz EW&BjfB9ܓZǼ-͛*z$'4t.QRu`Fۘ3R `J@1q]%q2\0PJ4YGhk1 &P^)7>q>^wLxl;X=N칞m_4Q8BjOaV,R{B҆VjZ=Ъ'tUC|ar6-UGy56siͮhz tsD.%mƙbhs`yӹztdr:]Eq:Yu,7 |s A~˦Q/n|uFso.ootjW- O{){=7Wb𧯮>Hww3}.W^s M|MT ߈\hOoD/jN r9 v)Yh(85O,*5Z^8l!t҅\;ɵ),3Fa!!wpFh{ |03VM}(`L2e@Qc KYF8ʙeZ<@)jc‰iㅳj9D" qA!bGfDt%;$:/pk-S^2`&Ow$BW>78Y n&׹[Z%*Htk( ʒ9PG+,B$x?(|fkfvlV+q|>`JOM)G*5V I<ؔ*hQ Md(d܋؟ Za]u+K 13(Xr2PFJ: G%\_3cϵ5UDBAq`MZKg OZ )7P IԴ mUN5\bkd?͌9qap4SJhXE; U) |#"qܖuDM!MB, %,_կWC.'e-!y y;'3f豪QE4*'>Vu/ߟ}o8Loq RKڙD;:mUS]cӮqsZkCdW49~*nmR8~xs7,]V3]n6N~F1&J*4v>Jц 0TǦjX#-ZAP (lٜ*)u(ɫS6}|1 ?;! ~ TQWX\H};uJWvL ]nI/]DdF hC3Xl=yjy-KjRG=")SHTR,JYf*#O#d:L%E$VK^ DoJ(Qޑ%>wTcg;cK7R2k67.( ]$]XIj>:б Ȕݸ dj/k׭ou4Ӊ;hjWs*I)[Q]쀯 508$^^-ۜ$ti-սS'5A>XCB *X"!yXd6)-n<[G <:2}~#l8£ 4!@1Sȥ(A[Rx.W=kgDGy :9Q)H&k\Oi]Е$eY(pD&S麏gUK|ퟯ큑= AilG鲭AJH,<j* :ULJ[yl7Av,̤']jf>  mHQk4lOlėZxf ·FaEe#`9Wǿj $gj(09DzV0~oۊ˵cdZm|+nfWcZ^2-Ky13V{a@mh`vwkgo:v>rͻI p9=jʼn{)v|,n Z]o#\I+\Mh_3FK±ao֝dpT!Z_/Wo7/<_F'F0D2ɪ؀$<9"],9dH Pٰ5xy{ 6(^ {u r4 ^LkbkPYl\&>1^ЫnP]Ǝ]WC[t^\oDpX5fs{RM:g>LTo|͓JeG*;/otZhiJV8' H7jt1[Ut!*]^$;`F)3hmqdʓ(*|2:m]'cԖ$PF~ATdcLT5]xm./N0wEo$4 ̫//]қow;zr& LRʰ.JMN! E(6h-1OEKKaBHl/Yk1%ˣ:-!xlRV,SÐ%9\i&#w C)F|R>fsTEkD7"Uc0ZT%Em2䛯h(-!tC]vi]j,,ebQ!X_Yc.wxNGDz6>"ejrËH}D} j랫 :BwDһrz3xvmF cds~;ino>tޥc֕T\ {K6CJLU]Dȉ4{ dwj6ΪvppFl(rN)R1/NKw 4361aۙ+nO7|z%ٜY ^Bir!'}2"%BEtPIFnחAX `ꑱ߱S^E37ViD M5g~XOå.oLOvƙvWӛˋuP(!:|,Y'/3^rkQJ}lk|_?WST~D I+iyq.L*IɘA$ I;m% bɑ"e3-ِ*Ț-|ajyDFr,l;bH;,t#mݰnU;?{jnflO Gl Q(J`Qy&6Z{9"< 1!U۸5jX|RBdK f@) \gJEDKZyZ1.ʹc k_ URkB#l&'I6l(%X lL؊Ւ!A1J` z訳eIYD2#cC*TloَQ:8Dl"lrD7i 2n>ĂRXKeP)(VZSOdhAKW:YPLH!d1ՙbdKZ&VBT!.rLCe!:iɮqQ8&,JehdMwI*6u7E[!G [G\.iǮxhU=a[ ̛#kVoQl(nH!t܆C^_ H-~b2)\v#&dy~S^[&~r?O.s'Ѥ0On'魨ӋOL' _oWl)˼.qN]+|#*??~"&?z<~v$]`ng=P!-dw5]M&~LSڋN%3@>)%cD k xc=e! ! @'OK_Dd~g*O/L| ~s\\}+7#pOӓo30+#Z̪f1cq3*C_̪96br{C"X`s4pUEw,pUnpUtjWWFZcF$FzpedX`hઊ p,pU%5tR:)yDpUvhŕh*1X}WwkXZgWUJ5PW+F4_'wQlO&?*aJ'<-BT`"Гo]koǒ+blFroĹg~Z)&)J3CrD1$)dyԩ F:D\"*TT^跮o uc޳fۉ+9NCpZ[Im[>S)cD\_cSpUT X;\%)+\&'WI`AN \%i6 </S~Hbqŗ}\%ikvR*}gW@COB \%q>i%WIʊ 8+ t^&qUc$gzp%9+~2pĥT*I+~HRs+@׃~kPT sp7~yҽ0yR[ .ffF1kT aYΠFh}s&IdZEA7sj)L|, U_^TBY2Ғgii Cm.w:hmaLst1 0hf[&eK_U{>4xyu#Geֿ:F0n SIRdJrT8GJ*|frvZ Z_Ҧ'XEJN랺^z5ԟfӫmm]PClTp huZVFI%0i}Y/NˏH)ekSs͓%KmZJ񾛾mʋJ{F7Rp&h[̰9&So(gmI gZr9Afx)|MO>dS@+#M8SE3\Ɔ pA97s)L~.K;51SWN^r4L![ X-*?8LySohCýc`Y,f!Yn5LO_ Vmji<,)e,:u+dkDT~{ LI,籛$9މXTYӅJ^a C%WrM6ÚXY oPvTė1?,_o!]ESW'DڅsaI;Ns5eIǛtPͽg_繆_bNXG*ޔ{a[:NdjuZ-hV N.FG{8Qsysm.|D/{G)~o|7:nnŸkJ m|J9UT6> }$%>gx4i VF:\t`O(D2-`V L'q<N?[ RjzN,aZq^1|!`evF`:su%Tᦏ5KoB{_݀@JBT㻾Ken?tCE2lҌi\q_[gfU%yf2+7L'֮}&<{Ld6˵2CCH VKB9v ߨNZvRjv\'e.S5Rl).=J2up W@J)N 8իV_^a7㗿VMz$\ny?_XvU:]߃z+T<,;,u&h茏H\2f͌"(pvMhU! ^RÄTЧHEYs6wNLjWn>OU)y^x ~,gn(k^*Yo>״ޘyn#f2n7n%`Ӆ_E'`kLvY  ڀj{sQ b|j&F$|$%?>wA}Q S? I~-ͷW\ ;(pսAz[B7 O"|$iKZ09?!7N}17??Oަ"¥sEgն E6.,|q~VZ(GPb˙X0NLހPd2wmL%ђ6_ n&/1&q0ƻvN3GSԺʷCгGޅI<+0)9\ogL)6 كRVO@`Iɬ$q>Փ$zTz WO(E|H{vG@"g(fP f _vvmh!y 4۪+UB7f DXB-F`%7tTRJJ wNU&S%a"{&CDw) qJ7DqŜ(aʀYDBa$0=fe<+.܏ϖ+!e0(oG7p#K}[gYJM?5]-az4}7}xҖ0##I<3\?Z "HC ('N 1Llol>W&?^"M Ml\b-,Y%ئ /M&߶GLLL̼U_Pbeo+Ie44LoN~<,T֚m<UZ4X⣲9 ?&8H D2j9 (G"S0rX 9{^X8f4a=al]k %jJ"䅏26}򲅗6ͯŀya.xaB,3+DEXx"K*4EP0b&EcBXYL^jʈ+1h#2CjIǬqrtLq-GkׇscdZnM C CJUW^Eܾ`qǗ[|jbkQ`(H($sŕaɥ0T@[VxyF BRpGﴋLRcP4a4 $!ڠ0)`e*Ȫtz{LKp7KSz;&6jo>U;jݬlˬ,Pq!:85$2).e! 6HX*+w~ F+sF'Gy05h#HcgUH/131)5D!yAD׍%pv<##鯀XBEC$'BaLGy)'k Jc L7W}jf&C"Cڊ5uf`-l/5ɀBx>zʑw&OY:"jp+-AH-#LGgo ޑnwXN18@JWijYN;Oµ/].RY&wN6/S2^XAV7tқkb:9\O 0uoKS[ ʂ3f~P1jyݚl0bki VH u旻Nb|{?/ œ$ Q*X 3/b+}0* Cܟyb#SXkÒ R:%|p#Bk9uDOuXRKLP4XEƴ1<,xÑO"zׂX\JsqS +B憵<*Y_3|4"Xg\1Ef$G GjA&%ĴR c:Bhp^Ѡ(%GVCPK#a'k,| ‚|ӛ%h (@\空H.^C=JM9~Z.=` nj3oӔxŅ̓MIQ;bjUp//}Wx!PX٭Lq{h2U{{$)%)RaE ߷z/CLjyꞪL(i9N*cTHXG"\Jlf$S&1jKfD)^jF5,+a2&1I %Iĸa/I˨2#FQgHXHy! p(\kB-ӡ5*@RYЉ XEdQ.!uE2d4XWN `9 )R5R:@*KrUG7 .5[4kd./FV[r+-0=R1l.&ټbyi`G{TnoߡDջHyQ9 IŠ3p&+!pRyϾ>wGzicg}zS3"8 ^SVKF >%~OQ8fl$iN AwJzD`B<5 S띱Vc&ye4zl5ͭKV6AL$y-wn}!zr%W!*U"PENkG4:9Fhm.0$5AZX,@(<* |0arroD569E4\ ש8N7wǒ1>6ij8i΀`ne2VO}&)Z* k^rs9+nT+o1f,p ꀕg9/"x"< ϭOȡ^B})n`E1\$Yӫ RkcL%մfl WZ)хqƖќ.ԭ.{aII8y/z(oN#wC7O~@nmXcKpwJU9dsd$q&w4hd.%4006N0+DTB.Dj:)rklpy,Z18mVk),x $sd4Faw7OR2i!(R *f3IE.qVq!( A t\0G iX6Fz}X 1)cшǶ7q[A8VaR`0 7x>j¢.nxİm6 (QHLd$&=cuXҠ$MFbuM k*u9clKՋaHZUϰ!8bSart͝DGҽH%(QHiayh>Ƹc[}(և[l܃ [S#{_kw1aAFD\W\Yt"FUx_}Fwȭ܋$CGBbrNE),! $>/-Ğ.㧎.EZʄ2a^!(0A*6bDq^ES2"蜦ֆX7+X屙kwoR{Mm+Al.!9 E)۫oS+Nya~tv ɣ>l΂ȹGƉ(p4Fޕ ` :(0JMXfOdLNH`٬@RASEa+><u>7UF9ݚ7?FOwjTXo==4F{N-R "͒ 6*GY,% +5}9%xez&|}ٕT.r\}.F{N?qcMT!)@`rEnIΔnO(["cEP L)` SRz> 0P;<*w~Ӻm&Zf4]›7ݜȕ c(l7&BN~TS l啪.Ɵ˧W7itj$ճY4OdsyAӇ7^eBޠ'WC۫ԙy(X}N<4\anzE kïݴ$R#?_~b8ȭRR|%Y<ϼL]+RN hrôɩ*|9R'-EGc;dל᮫> {yf}|si3ZgAvBpjS4DI:j"","-$\.%p A2 ǨU1kQR$M!zq#g- .۟MTHt\K+?es}{3&Y^bgkьa4qEgL6eߓL.H&p +Q"&^82`90I^ sj}4TxF 3À$ƝVc  I$뫝qǂq+NR 뜰nNZTV24 27O͖nvbBO53(gpfǝ/Z$ͥĴ#Ho"u!AM}=0kPqᛳ3[9yR)"ȟ/{}<ZDECvLaN06SDNobNpmt8%x}:W85!`I+rLH]{ˋEWdv~@b^-v^| wٍ)I5K0 Mtvbt}bRN!-ir.=u=]?L(|&CưV\l|)Up1UOgiYl\yUn<.5#(tB̵ӋU9TOvfXf<*^^drwZƥ=1eݐ0YfyC_(4i%wD}v;UG^A.uX@>u2B-#cC_1K;/)I==V6*ъFįec^y;s+`ǯ~ŏ/xcw?D]W:]Jܙw"@x?_yDײkX6Z&G]M&&߇S.-mMB(]TC9| b~]X 3W5QP{T5k_b;C`پpQ1\miޚx\6!r͖̩cQ:EI'1zz1 Xv0ud Oz`VNj^|;/I!Ӿ% 8 )\ 0abXIH@ =HB8#\{ j\FhZm^yDQb6rͨat{!07ǒ12SC*nrBdսA'O6`ou+0Oȴuz&tgHV$CvspAÞOh->zzy~RSGO•Wf~a*:pMv#Tȕ vVR[;D%XS0((`oKfZ|O͒3q;p99+N|xBTҒ*-yϖW9ɓipof`9vqeoKҝUwirU,h< K{d/߾Jgqy`EP{<{FMO3؄ԛAS'mV Os4ck7jTBj 2߽&MR+R3E9AZ1Fu.8+Ei6k=>#sȗ5Jtkp6HF7g)Oc.*5XKřgrDEhzr۸%ņO&nB/sɞCNlmB »qɲO_~_{{hkEl-['tu} ,,`ˊpma!LK7Kkw+[ljs~9MX[XK1sL )&5U^3^d!-Y" C 1S 6 kdX~ Ը( I%0p4[.yof΁<$e`M8ˮ8)SkS 6o@ ul̀Zk%[8с#{`6 (/U5r-4q_+Ɛy`*1tny$<#VZI՞CzQ 9D ZI~ݫNU>d%Ux˺\xx(ȿáDJ@.KtݏP!:e H%1JB̔̄#%J Z#r[k]CW[.gtatu{fT»W}~1o++oilg!76;6{sݶRvvDbпIڟJ7T),Jos2RU-cy4 dC%n^Uuw5<>>2 mFxsRqjvn J7\MeEw%eʫ2Oj?|.?OJeeMQz+eʡPH ;udɊG(EzdLo}2g0C%a="A8/g&Ԃ[ﲵ;P& 2$&0(ZRbBBg[JLhe}vF g G(yi\ZȚ+/=4&9w>BL_ 0p!⃙sk=eZ).wT\2G̒1PS0>-f̟^ώ)6ݳwͬ3w P{Etoō4 c2x_fQ̋wWsS$}i=uǿхkI7J4/ʌw5F+ QJQow^W]h8-$M_CvHn֧hQ鿭œ?|tx n 1 GMɯmҒ7'nr5iiޖfx36:YBir(2e'?^.zvl<99[]vվKUrk4ht> O3_yT<숍'7آR*4'wQxx< ?޾|}o۟]O4JV^̣EQXn5o~GӦV޲iڜvhfߥ]Y=^>hUA;W{kx7?LdzpN\2۷?z$x\@WQד8ˏش6dzQhTVvp[b C< ,q=IG̬mu4q [nbG i+-pyo@{ v{9K ?[."HH _zS7;H66ů칖"8lpy4 4Ѧd( r,`=wdv=D Ks恇P11c$4sчF@6̦/w_J,&B#ou_H9"n./L0ocUw{FM;0py[I! - q?w *g 7ҡnp@t0fzUi'է^|nz (=$Wi ׽>N.F}{3!- Һʻ2N%)3a,k)A5>YSҪdz Z[{ !BYk &9P$&a Q!x !r΄ pfggGB:kYنeឣ%o[=!i>%=b}5_ѥlEFkön:z&tdd::je8ޞ^.Pk5HPM\$iyl/fSCC΅Z8hOyK΃"9ƘAR9sB*\$er"r;Sj^5Qg87.)%讙O/Nk80EVo2ObF&X˝JHHW `uVpgϞLb8$&Iu38􅝊lz.H%"PŰq%),EZ-EHi" f}=6_? _J mJC]pC?%Tq{#$@3, 2ltjhc*[BǤ6*tu1#!FiRbh.-GD6Z92晒)̅ڄ'8=?0^|I<ܔ>NIx?󤵉4o>{jU=Y$Z\*BGnC?d_td49 sJ-Hڙ̰x,|%W޴לE ੮d5vp<;Fj볤l_UZ);j3J @ ^F*Ѝ .6L?3(g]ҏ8Zk/2mNcA@j d2ιsf:1*c *eԽ%"j- 2ee uBs'yrV$f#@踚8;愙z2ql7CaPy>N~3|/mv$<^,4~FyY=ˈb6gmDN&2A1Ahf{e.wxP bx,p9iR2)D1NelpUqM|^4}渟T3Bd7ZA%3KT#5b%HP%Wp ˔BErΣs^˅T6 J|:?U=yu %WH0dQSצrnȤR2¡$%@5h*uN˼p`RϬ9^s#K>0LIBLk+4yVȳ tFUlޔ_\1ۛ{㾊^U#| $o i<{OEd|#LG=3#sg:l:YOv*6mszmv,R?,Yqz+}z_ňnoWc 4bs -mܮ{ϝ Q\uiƓ,H7o<ױ&ùFÓ` '0dc'WEkE6փLI2uѲCo=T\U\úJ#]tTLPEf!CeSt#h-(@Er0F=9~5&!ZsǃDdŠǐP+bI$nVZ ܉^ƶHƎS(^{ Y{ű_[m/ gP|$w6ҿs|58v :pFNjnceLpFwW.yCI']?\{1hQqĂ B A8)Dց,#Ҫdr8YdiYGZJFܶTYII cBkZi&r;s:uKr/[*݇fH_{T C%SЍRZ_Ф?Zj/Kd@>=.+nnغ=nDK:Vm\d)UI_Iaxt_dT#))M趖N^t^tNAjzqzҩ@G#Cԑ{&T@pM& m@C !i,l<Õ:N0soߞB:gzv}0 x.s!X'SkSL*m\e~u WuOtU_bn33]@BA&Zj4ژYlkT޿vGcs 3dU>p.R:$͎ey0,u`6eNɥ0sÌ'H)z ༷ eNFr:\ g+45k+!O`wo KeLSI:09) W,e2LH Tuȁ>:$YA.0A?c l219Óh.P#Ay^kfBǂ97 jjdE5MN%43>44t)OƃeP2V 6,s׵Mޘիi ۟Y{)*'ÌQpPI ʒ:q 6U(yQL"(,C9=3h H'I.ϣ#(gt J @2T9kD_vtqƆX(XXh{,g'E3^lnNt8Y>_OZHQ"˥`6$C"z)1&4#qTFR 5ds3=P:(镌X ̥MҪRfAvE͈ǣ`bn jw[ڛQhdFTL'RD,0aF[(%i)oȊ@YC2*7,—dE4aML,(ȸT8FcFRȨNڹ3rnƨ(-l1"B="x #Tp2b b,<|\f G";EDk1% >- ,i^ kuȹ$z՞pq2OZMθdS\Ďq*8d}U[ F/ nyi%!]<>5Z@Ez re:yoY_]q;h7o_ىw޺wɻ sMEYǿ3/1@B|QШFRB(@ !QnUX\x4.odQ}^wW6xyg0F7WU)p^ |oWs ӕVsͱyօOϹz\Tqy6ۈ5L^#//D. eVF-\898l{lSﳶQJ+ke6X 1˜v_Yvgm},Z:y\PYA"g $dcOm22ms_mqyURxn[43|!dz>VmO}>\" 壏YFnAC.U<3%RYYn8EAvw7*Y 5yIHg%8}M&'ZsA GB:eUg,Y9>yI'+_Ϻڍm@rYeixe~S_F1rJ+EbrhkP՞eFil[席c`"'VUXE"Lް;EZZQ:'F j_zEyz96<[fohz twɷΉ\Id>i>rEZtirmP5=݌<]Ons4ѳgV[2Á_=MdMOu77>\'WC17کs>ux~ s lso=4Dg'ɾd4yHcv{cKNוm>"d_nORA+ NiR7NZW 0׸RO;cٱ!e:HSF'O2ܓΥ{_F.y9$)j!%)H9F^6qPx.|`  n] L pȸYRI ;մ-hY:vgb8-20|oz,XdjZZi'Ly^ *^rxkТ;u}f!d&f IKURyF) cv,!Mr wSrRI^ uYk"R4 $$k4&HhOJ`-QcPĸ&b`f/VW57)Jo hI N{3Ȓ)#_o}޳9sC-MkGl^0A*.\6FyPGD6J$kAڨIWB~ {>jϔ_G^pk)M"}AOغZȚ+)_xq"$^60L\H/fR~\u4L~LJή#O0dLפ]0d @0{v?j%&t臧)nzvoQt5K%fGIדL&ՐOiBakpQ@}8.Xj4Wiu o${5=s+.8F?LolriZ2'2$u×wfeEfyOh``ŲGDO^\벱jcLFFH}:>n"N&_*W ո鸄cmhTO6608: v{w}fͷo :_ LSaw) $Nqtݛnѵko5-NktmYM:.߫c5;F>[[Cߌ$fOt:_5UVn8Mb~x02̚h%HZXalÒ"/nަ(EctQ]ACA:ڑl]ec/g㰘"%MPRgpmi_s%5Ha xDT22Nj324N+mn6g'N*o\>>\ƥ$gOc8HI~*JdCwݶ{HP䒩$&GaUrhhj?F%a/6V=SXG}WXscDM,,A+JVKtp2sY]7S1.$`,k&lT^sX&FfE qfC&nEӷF 8iCkI!Ҵ!ϛࣇ153ʙG06 χ5\!Z̓MI4J؀ժ GN >eb~B|ȄPND0|b&+R=!ӭrt;_J$8f\R0-63a*=z-$%^R0C΢% J)t8+c@Lucs4!FeSfNr% R3#`V))2*Ҫ9Vgjm~EjqLvKsyK|-rOZ6o?ˍ PA3+"I@ ]d_b@eM(MNɃL_Uoޞiך,iXd u .I:^y$FX2OțvL>oB.OśP+{* _7Jބ9/$-8~Z20L4(LƆn$9՝Ͽ*LV&.'bYKA(Å^!LĒ+Fmxꚏ %($*gDŽYF R i0Hd.2C0_-=6ҏM${d?_^`|<˭7?{R2XA=m, c%s6=Yzz @0fŠĢ%AḰ+tJqtyp1 ˼?fn?7#sB$8$[R+ΥpRLkkhʋAdV&{QqdYb]+p(`hQwE%@*XDVK5xY]UЃfrަ3g&3n G-7tSp>Iǣ =FbnItRߝ+i@}oA)M})Ҋ~Il0ʰmo̿_ .&ip2m]{&eW8l7ontW/X۶rU7Km۸S͹؛iD~:50_8'M};I]d4'oLB\tXCQl3&\.'}$ovLR4d4@RцʨLnv5n7M7i(*!~Z `[ŝN]QC#kw/z0gLЂhd q#W˗'|ڳX|^*vrNu\e>>^uWYy5]G+7(NQB1²ҕ1d WiM}<1qp Oȓ@jg')k0*,X!sdYqBN1Ԝ'Ju#!,0+&FY)94ԝ9e S2FfY53ca`R(9 ؚ$sOˣ˪;{CVD! !VRHYQ@YJL X:u~eƇ"3 4jJzHXR'` h,k9[ AiR%A\`H>8\8XrI2Gs 4wt?MV 6 3n4s R1N4P[wD'6yᝰe| g3b"fEz:,*w^Wh&Fߍi@#saEPZ3~ach1$m M xZn@_c)C=Z{z?&V\ Ձ |sytr>-I4٪HV `l2xa~HO>TNY:e1`xBbA9B A")A@YSVjI$iJZ{ dMV(E98B[_Xc̵-N]B &mxg}iOsoM>} W\ʍ|Xlyql/628lTLxi}SBEԍvhZǢ"o˸fkU+xf,p ꀕg9/"x"< ϭāh-R&fTOp*o2)DPRmkY:SkX5]gl )jYN<46!lp><4vywS*YcKpwJU9dsd$q#wIu)#ZK(Ij yIOy((HLBDN#"qm?gbn;Dkoڟ %ƫ`Dذ; 4U`R *v/G.qVqBN.|VA9A#(X9@)04e}9¨OJb<lqwӈ8[ivRJXALBAq(K21nZ0BB2g($^2:ؘXҠK0IN9kĻKmA/%lK6ՋeH:Uϰ!8b4art͝DGҳH%(QHia`-]CчqǦP7̋H)XG> |,(5~Tx.~U3mGpYv<^ nGa+r {ߟn;jbawBᤈێ:a(SqbN.R,3}"贌/< Op>H0Muh)T"<{HBmˍO9i.aϦ(#M.tC]w^ۯ VO~.m=@PӬh0.) !+<܎YA?m8n> oGȣGY5͗,+ ֮7h(m`3ln9Q 5(^+.ô7 D[v3f}:]or eTiRt#}R_ Vo!/r^6 _?Э^J9<uˍ.$W:xzs+mZʄ2a^!(X0A*1j 3ꄷ5 gMXڎU%d>sY1zAH & 5 TKAjZ%*8|槙s͟S.gb.N9_+߰70!~)l4M=f1a䔑S"k)ۛB"na-^잍ǩg06 mסtXl8Ec|\ :S.ʡ^G)L'iJ1EުT.hu6oUTE_lVI^Q+ʍ(Z@$Z JR#h6UMwk(B:܀7 r- CjΞ]oV8L0'~܎MuǦ;VM6e^%<@rFϵ,wJÃ&mW &ļ0a?3P%HIU1▁/؆{ w `ݦc`)T09sH[ ӥ*beP˫Ld_r3҆\YR3{1-DR `I1sMq )&]6U}W9rd0c X`ƈR EX!%!)FN֒vs3*C H Z{1[ >FBSbʇVL]-믗aw)F6da|vwڥ=YmBw&4-}6 ûACD_H>=F=PyOCÄd0qr4 I1M1et%<iGxk{&G I }Ooh 9Ck@dnLk%!t8nGaW+؎yoDK@'-BJ$2Xa$tcKbƖڐ2E ^)$&iJqTQ& Ȣ`;X+j%%RHDcUp[#qG h|AC'o LNqμe֏j{Hl]"ࣃ1٭Jqd%l1U ّUdǃV'J gHμ5` EI;{O=خ<irc98FXHX, 9dAyLXv3,}3FҢgbs ;ͨGE.^sHZ<9v>7˲M|pN`zr4F{N)e dG¡]ic* ֬Xh ʌ ;ۨQ1,%! j@CmWi%A7p8 ًz\ialWt3Y=_mt]w^9+IU$]^Ȁ.& Ρuu^^=L`H6/]=E2u]5z5~XY繖!Ay8/g#Ws{!soXda֬_n[Mڬ>].Py [J|&W"d_һ. K]EMNT1͖.XێEg"e*Ą/Ez\%lKȷ]55rt1L $Qs9lEDo{t>_=^ yx?=ZwxqҰ, =[|]b#rJF d g!U(aA{*f@|oS*%43=Y^G*K~D"%"03 Hbi%1 ! L<~ {< {Udɂwº 밿G. YRq)VxLP b ? TykkZGXJ15@8KFqDY .=G4*\T;")^p!rv=hC6S<.$UR?8/]-D`%Wb򈤂*û"$0 I/c]FI7ƅNfstR?)|OEF҂LLE}dL&8cH)D0t|!c3kt<>}W@ՀP& -,t'Ux\$LHϝWAYqh|U ֤CN§%WoNOO&7 rJiIS,[ Y`Ba!(|R a}NUl܌k|xl'FPTK(6{]\sKlnEjn'EҒK3Nn? ^1Fq$i< 3;DBG YL;pTOx6f7YrZ=jM6T`OGPH|?Y}oǠb<'gcIJS<[YoP>\_|w7wߜ_ߜ9&۳wo] Ip)+P# rk~يE }m CՄ3.kƽ/>Z鸅Asӽ Jg?~f>t6Iӟt45gE23𙯿d_O˞5k_d h Ih=1VHhi1[S0 :EI'5}c~t0ed~V'zҀTAp'5Y?2ÛoyuRWD08P*l9H8 AD0I1l  vF댺}{Gj!zHlC(0Tev!K9J%V!ZlS+׌AwPBfKmܷi'#hm|ý D7VnsuV xcKH0`o=.tvHyƌi:1r [rmۨ)33D9rX&Tt>;s}?3;~/jڿsyW,@ !"F,| KXXt]r*ub){7~6ܓo&uՇR7W"me]4N=0}*%޵q$ۿBcU?f{clֈ_60~J(Q!)9~CzDY85=>}{MNp &gEDF8Ik.E;5lR V \0J;V)e Bí7;\)# L,TQ!X%]٨HSpӼL{2 H\]8kCB$呞NɇQwK3n\::]r?GMq<Ϲq#Ē (\"{ ^r.3w:6\H4ASxFCR竣y O. ޿򎯷#\d%Md- nLsޣRyBB;G%{QqdށdL-uG PmB})m'\_J| ܝt j2~020esĨyb5`|E860fx"Sn U4=9o:}k:H .µ`9Uű- /ɣ7]%,

t0^O?_Sθ|WfS?ryHӳ)ٹ7?O9+1K;<]M姓avArտߴqB?z}% |\xXt_J _v_rt0+ s~}I:?aa.hhk8tI4~s#![Ja0bJr,M3C*]A&^BvXFQaDv&q13%#`3ɪ<Q:0Ȕ!E%re[MuZ\ "봄p?W.4c*ڂwbAa_iիIe3KHbz@|ȽK@TRRQ# 7'He%=:Vyw yhI$ڽSu'Ywʌpu,=z JU2sJչ+nхWi.Wh7̂:i|Y%@A994t_p4>D.#.>'t7q}ۻIkpΩf@6ZWivi.y.9g/?dL={:R:P"*jiv^&`ϝ-1A>.V^Z8Zl zk]pÓG Լup6w' |j0sV:*f{*ܹދi@W{<:=(5l>o[/!{7mN^#Gx(Qƀ2LN4^p(\ӧd1],?Glr;mG(V"ɸ U~ jo.lBp R`%Q&GDnQlJp.N>- O?Z[r%JY`@l4*=vxf38fHNtft|`ٹly.cdyNݒry*F39; jKsC@抳\ )} "P،fͱq esg~1מ V\o>v glU{Ӳ>i=rx*lFNnhxy~ dy+wq堯A_ߛ98&(3*NL,O h)O&jZx$ԤUd:qҲXJF:uɒ,5Ƅ̽?Jȵ_Vv:u:Koiwsw^@Wg;_[s"S>\XS?JdH{_ (Ic5^:Ƣ5hbn$DEG&R4̆l)ˢ]w{;CV9}@pɪ(} |J!d$. k-`Y,(UNɥ0svdG"2)b:3#s;+E21&Κl+!+pO8}"$392&ecYI!W2i"c3F:Qm9X噢@\"a6>c019!I@a Sdo8OԖ.nmRʵwlT7 ڌ`KgejKtrڦ0銓孅ڞj/qXLܪ{0VFJ*D UϑKf#@#4 tUQcރAGEqJfMvёet:EJeXM5\ªqjXXM36BX %'/Q5!*Mt8Y>7>?i=#(r~0^kC"1 ăʈ|Ţ xݷP 81{.xMtP+5TJ(KU9 Ao#js;bQ\01wiǎ{~* 3p׈ tr*Eg 0ѷȭQZK2HS2ސ+AA vd,䲅/Ɋ"iš (ȸTD 1#Q)2v2Vv\2shpQ(Ķf.t8:j6Qrp1mWFj_z{XzR}wo~_\4lrQU=ZժNW]+WXfK7dGg(Lkq{!c:'hR!`l".ɸ *z5f$y}g5, k[2mo< T?E\nKU"-]YԬb UfR>*>"$-pUՓ+Gcp.jpZRc+𳲗Ne79)]#*Y}<\5\=|،J#g=#+"@p]]-I/?wk%ys=)K.%QZ^CW^ S+F]Jzr'$WF&#W+әZqj<)W!)4w5Z\ _ rJCh&$Wltj +u&\ /Rӯ]j~ϯYZRyv웵TWܴjy[9wlw;o]_lZћwdNvM-xٮ}OҜWTdz {@y(E28ȨWz~WgD,W[j;vm!W Wz5LHP/}j 2Z2r5P[|_Dl:)dj~*re3\ krrEd'$Wh:4h Pzr%ՔՀ&#W@kI]J>/Q<:jәpU"W@j$> `|)+~6po2sWjޟJg_\))+ \&㮀6Ľ/2]r#WD\ '#W\E2Eʕ8]j @]}{~X3$8NwGeL{-Yo9}o;;Gxv@cӯ4v݋|Z};}o? 1z׳z`xv' y~2^\AZ}J~g 1DnE8FtւE~ã[vD1= }[lWUWf<q}yҰtA!%kK@X jr`u(b;M]?;Q8;vkX(vi@{u!nAQD{v'Et<|]6ɴEށE]Z\paVX%Ѽ9Yrw+u6O: ѷgW׈U70B왹?n?l+J`ksvcUEIB^>;yT6j)4Ձ_?M]b7s\E~Ęz-9|<@ALӪ~ERvgf+q콘V-U0ء'k˄ Հ'Bhjtz(W}#vBr ޕ\ d"W.WA^\9`2)\S@vA^\sOi eBsWnwWA^\ ݄ ઙwWx(WԙA~gm7\Fw%~#W)djU\3{_ )W #?p{Іrl W/Piy2r_tKt( W/Ss ;#=plHLưڍwnn4r~whPX7dy΋qf(FaTbYͩq]=F.EO.## p.ݷ l6I.۠DY.[$9$GvJr#Wx2r_;+oA\}#In}wQ,nj=w.˹>]fW/^8i{ RMzEg`q{2Wn.:A߷b+|>4\[}Hy;Zl.ޠqt~u}~ 6 E/5~hjgٰxxcTNo8}Tf?0*с?70j > }opA\{vӚYToy`|&c!eF=#HQE\@H>c~-p?AxkG登י)[ď3[4t]#ϿKǜ҇% . ]ym?~}^.?x_$+8ǖ,kגWWP|vn{M %H5#:KH㹚Y#7]?lMoZ,,x~9?9[퉊115(LY(qL?Te,d׆QךC9e&mԘ )LycJH9W[aJ܃>cGIC#;qԠR+M!XmXCY$2 Z-G%c.цRk`UY)V1J.ׂȢ5NWJ=RC~#ZtoK3XM\]l] \m$.[(85czIPˁp> ح6cv)>o\.蘸p1Mc/9%C ~0@flV74j]̨R(W hlH.5c&[c; ci=WͥaUR]C/^[z!zB^7&#Em]_~dAS![ġx  9ל9~Г Լ͍hDU`ZmM%D:gRIIT1ڦ{ryz/RK$}DN'mQJ҄GZGH }m-"% H/&mBQZ Qv \jH) !TX8fڬ@`j47E<'ӼX\)NԐ 7RN2݅ DnJC,2>Ս` :`=KE ȎlOPPKԑ#1YeƧp4ZE 9YK2r3B^-\Tlpt-!x8Ux;v޵͎Cr4. 'ʉsϾ"8tyvT:8j#qї ,qaYwqkЄ15k.7kZUAp Ffr#s6cOP (ZTB5 )VzlNh%؎~nJRq t`*1+5!6\mT( ʄW7-.BG sUf(QwN⯒2*,_WX2p6L6%0v]N+:!TV u+0ĸLAAX7[QH C%X#a!YLhB;=k eymFݚ ǚ-ŒL71ڭE }&XVݺ2:vdo޺ ,S' ;iϋUĝ6@@ۆj¨ZaĀ;L Fl6>z!zRh`[2(߁*QVW8hw""x"o`ઁԫBѲ׏F kri9O&[9|@ӲP_VO!.$( 9m'-+tgz?av-'7J8J|457i\4/\}k+j!)Vjk\ ~?`e[ϭc4 R89c՜1OU?&x{Åޗ/m8<'p>>:LE-&G/0N^QR;M>q?_Rǽ/5 _gu&OnЇglu85U =ԞJL{=?+{\e;{\qWEZ.v*R hY\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pWO \' - CW - p*'\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pWO \Ad>WBYn*fo+V@JmzE pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\=)pd+8% do+ \g< +B +B +B +B +B +B +B +B +B +B +B +I0hv'b#5;eX*zS7CZef !XxC/zkc(̓ćR?|D fJhR^v3#{d@`Cޘ"W"ysUTh^ZO[BE\7}qEJ4W%YDn\?{upɍGXdƮ\)NLd2IHb VFb A[vt;g)|0^I}DFk"K`zI$%9&hx\RSB7Kӣt~.o>)ؕU雃^2Fp9e6^lљ[ .L˹dVtS7QEfjea6U5}?jT}5 ݠW-$Z/u=T[3Щw0u`>]aV?¸_JU")c4ϑpGbYA$k.iᦌ  !2q+Z ,NQ^fܶ2+ rnr֬\!sU*g+JCp?}dF]h4LʺOk?+u7H^B >lqe*+AMՖ5/0_#IATdL.lsYO6 |VN̂be֝.hJg=Vָ 4R )}̅Á^XceqĢ#o5#eg7ݒrq(FUC.1xю^kXRxL1A;Ž!A ")PVVX8vhS}Jcc٨yYf!UܔCtO>wbwf8sJ|QCmy BWN׻Afm+'-܂7 ty:pRlZW^|1b0gN*f #γdBp;'Iw4=JLܞw.LqmzTOv[?.mfE ,Ӆ#Ԥ1wd\.@#X,ʝvT@gN]B;3͔>GU y1+ە+U }vKeNVq(nBo)Ӥ^q݊~H.KZA~Ju]w#n ny&ׇSA_ }V+s.n*-l ,[>Yvt+V<W\UșSΣ#4j/׻ڻU=ѺVt#huUBvd!U_vܰJ.0AdR9jpXN&16t#vCv#v&=SncBA\i#`Cdnwuu;S#LWYv[6XNP4!^IP̥e!dn\>SVИmtw}3q k߽5ms+>e+#+>tMBU]{=t_tҞI1K/nbW! Uz>6`r* U[_W.VGi,'T\ UfU6fb֧ XG5/&1B!hmv$k-DFednGuv&=֍&Ov] y.`l9PQI8j ,2I4)KN1ERf$@q9+oNjw8P;N 5\15xm9Vzzem08Hʍk+^ɵ!ߌ/i${5U)Ǘ f L׷ytr𭵷]N%7窽&y}_uZ{cE䌪B** ߁ j# Qe.ʨT@[S>ZĹԙҙ-L3tl -' ʳM3^m/6ٻ :Lo?P`yЯ'rE) Iri?5 ơPQ eL,hr,9ШdJ:,Q=3ΊJr٨3v`P26 rU8hu%]WmhWB]ڝiǎXjF%.":9$R1lr5JkQNb*% DI`[ ?K]FRrIrfOlML$( T88,isĹ6A|]Ƕvl)ZD4z D2 R X&!H&ϖihj\$N-bgwh;n,`j6v)ٙlkev]DIuf|Д':*.Aӭ +2`xN@(ŧb;ӎmF߫=j#2B2-h QՏT(\_cѥѰ:8tPH9Y P ||TY0Nze_kv~ǭ*Zl,76Nl8ԻX}X??g|j0_iSoY? @Vk$׽7paɭ."|.8Z=QKhR\C r-I$">E.gbz|%suq zb{D2;bL!Gvz p<6A+K<& *nF|CĎ wͻ_=R^jҫxOH!SL۠ DD&MIr$@jo') NZ; zK?rٚrn $PРL!i ^(yR,jG\gpy޽!Ҡ^߇fb`Z%9+Ŵ I`.s'Ȟehd'{xl4L {;I^R3F \Zp^ŲeΒER4gxϢw|O>_pMռnx)hr~ /ţoS+ 0>YRQC3>k?^Ҍ&]ͅoV(Nڳaݗ0gmrەD9sE dMK4mI[:_ [ߌkYUw0c=Xzrycds\gEuY`H1R>ߏơzZʽ?7jW ٱlY^_7]}ޝPfNɻ^ìI 01A?Z=J!4jڛ7 =˧u}]>vo& ?~~y=LyhS&Dts=8 =_T_ ܮ/Ƅ!s兿+q)yW;#]&\̞4P/=PJ\gX#4PD Nv& )LY ); 8m^ w1Dlx!hb9KdMMʞV𐥠1Y\daNŮ)Ė@j1A@ʶ6/cQvu?KEOߦxzQbY9n#> T#c[VǣO^Nfݤ,}싍> W׫DfV7AA~B9˲ gn􋆾wݫnSwD_~̮@ jo5#ʙv_fJZ4Yj"iG`O$nI@= l6 H<+Y 7p7`փ5q:վ}W?oݢۡdS %ú( DYz lSXg1g"w8ճp .}[Or{hVEee}oɓny>tݢ:~+/OPllYYyuuY`@fexft3K=d?*+)AٻIgKf0n/'jg߷zڙb8{!_r/FP*3xn29 )&rr8Ǽh|nYGh'A1b$T"A4"Px`]qQ2]ZRoVie~4Tym]a5'}0ʡ`ast*>uM[Mݬpsl(s'_͝IGM {0ҥ*yU1 %.jx , sH[GSzנN)bxЄjO#ю#pgyN"\d'XiHN+-J[@"DTsG[("g8;WHtV/%& ?}+J9K@+B )2y hJJI)ΩΝ D_Kp3"اLA/@2N@%0l\ݡ YmN,BᱹJ T->{oTU*K;ӂΈrFџٛ-Rc^}S7+[=etHo^yZ O!sEziЋ>[P7&dN wkǜyG-\mNڠ'Ӣjz򛯒Z͢YXz,llՋכ*Ŵ'qKĨ>܅\u{QEz#lk^&>%BbRcI5^?zӽ6{%^W~dٯ?~KS4]M<{Sx?Q'Vs3}]<<{ϓ:_zu&ݗW?ϯMhSor)O YopBГO)ɣ [9.yhx>_op?/jV(gL4\s3=7{su?ߎ NP?}Η{ P~ΠIM>*J 3~5`tL_ߺfp,=߿0sK6 BW+[Tyjc&y]A?siXt :c^.ٿރTr!M~꿧u~?+O+o%W6{1^o{q/e<\"&@dSwR>(+n@ߋL ݹ? Cv9*B{iWSluWuBU,Λ.-Z9|DU5SmSYmEPUUèRǎĤVû%v꫿ ,1/a ?Be XqtY?wXzJT1*/8Z+{p'AjLl7,I5d;˚RUlӔiZE .c'@8\Ps ZnQ0="BQhLy2XZҫ t"/l5[_mfƜm z/nosdrs1g˳QRfڄ@I$_\G/Eۙ+!Q0hs&d̉#%D\cG5t\c=$(zGDw!L e"$/Ȳui+h N&t4D{S(sVA-m=3{ L ^X[u ^~\XTiϼcr'Ay2Bw* LrViȫ!tX`HcX\USX&T{˄6 &Tѽݖ1t6b PRmtJ#T kBCW .m ]%L;]%tuttFAS$Ҽya^g?([r眞#woz *?CogxqxN9gLP@bۭ Ӆy3atφmĮ56-ś? 2n``>en^ާUSy6umlxuYU_S2[T ㉮Oo޽a~UwT埽&nlgngXM)IQN)93Z' #mFx^t@v`yjtM&$׹d 67 `>[Մ1oUa6/[d %.Xv10 f1grA :F (58C]5iaC6 ִZFdZ%GFl`ZiCl%n]`śCW l ]%:(q#NTfT5.g)tʣʙ}[: PoWU;]JZ:EbD"$Ƙ5\ʛBW NW lmWHWHBDW l*U)th1RNW %imWHWRBx*V͡+*%GoJ(Y "]I*l K!CWWD;]%3g-]])(o2nΗ6Ƒ!GȐPj ҕf Udc 2DBW MUBI[SI׳@%\3}&HJF5-^ Mfpa#H hq4-7ic3T c"CW 86CɱUB)DKW'HW"p ]tڛ3襫RN(7`*)tE@;]%XtutŸbT60'ͱ]%T4ZΏJZ:AsDW 9UBձtut%RNQ ޵qk" l|?w.ncMP5ndɕ$nےkXM8 H~<)W3ZBWX4BH]"]i-I "`-+kZB[fQ.vute4eDWX| =jfhU+DiYGW'HWVwgmZ[u2a= SA !T,2DN1;,E>^%Uو`DTWy4m4֖ f=S~dgrN{`Qt@xnYNN蘚F,X7}hV3s>Ӂ0}j nar v˨,蛏)\'Fw2.[V4/.>[,,fz<vnߍUR9ǐ{^97v W_cG5{'/樆jr=BzH6M?STlgˁv8s$j8A~@0_SE!\@[]쏅Ur SJW<0A^iz9#mzZzRU-@6ֻߤkm6Fgq<gf{,X6?WK4vwjiˁv{JZ f{/ioA0UG/6k}X#if(yؘEӅP)iFtȇʆ>~P2 3*]`+˳+DH Qj 7`I#Rl *%s+@l;]JS6ӡ+a9Al}X#V\JXNn;]JiGW'HW*ڂ#feCW\ ъǮ)ҕu*#]!\s+e-et(mfʐyv8B& lbWVǮ%뼫S+KVKWS5ky3hzm;]!J::ERDֺeS2:`ڏոxvz>!fձ7m9f-%GөJڲMRlM?)oDfDl l jvBvtutŨ"$' [ ]1:BWS|BN8]!`ͳ+{W *fNWRv)ҕ`K]`A7mV۶+"#&P;*LS"NWώ^'DWXk ]\KM.t8 (S+ͅT,##5/"@thhzGWCW]`E6tpE6խ_w(b='1Ɇ xWɶv( +hoj)CD+ʴPѶ mͧ4LҰ;弜]~$K:i29롙v6Ӭ;P]wfxBT^7]/s@ sa};זTª-ATUDo|p5m7\L_("Ru;/R rp*_5wP+Xp Cfdc !,|2)[8IS.r_@~3JB"?T(D~?4 ૕M<|2t)nKRem ;hYΛ+Py xjA~?Mf 5\ꍺuNa:,&strDRITQ!Ӕh@7[i'ya cPj`& ,Yy:߮ j18-f$\E;x?fI5H;Ǎ4PGK Qy "FE+=O`j=2{EcW3~tLmyi0k;dewnKa.|o(*csdt}Ų&vwu>uT&NT;\OgmpmMϡ` /:6[9ѬYwHUA!V=(|pWRPL0 ~l-^.m^PE7Z1I,h>؇/&wP*_eq|75h-v\VY z߃@F 2'wt&/)aFqާ鰼GAR*A͖M\ 5j:*?aϸgc1?Q=`t /97v/HG>Zuޛ-[xa݋'w=#áSBu8:QQǷjj?]-ҭZ'}/zj.aVzSw`nhڰ@ǭ4zEZZ!:2D\YQ,%^a U[ V*mڮ{^n%ƪk2.RQgx8X˯h_H%ȈQ&z :"{sH.U^?ܪ]蛟Ɠzפ#bs}hqn5nĽM|O~p0w[-JqtO#tqCY?l;+5`kեYTQf =fNIrMj[Ջy!4R{S\JA[QtEZ<Ĝ6qO'>=n'S-<ÿ;jSVD"y4,SQ3'(' /}rFcѲ<{ 8.pR$㥶TTBHZcL^ǣ<mzq!_Lxg1+0/w԰2KW PT>eO麨E;j3+/>⎂k^VoBCs1bJjżtb;]!rr]oq?)5B )H 1";G߹ԝ-O ;-K T4E^Ee\J"`ґM&ZRIMHu?`G7A0-d)Rɩ1Gm`6U OD1Ε<CzMOtfjW}|Yߦ%]/isK{Wnxt;,W>`\+ktд OL+4 $H+  d)Uą3^/M`Lj͘ ;Te #`p8N&rESTr呔,_H-*>cd)H a-$(ƒN9vʱS-U2c%p!!J)7${SBk|RL+(YG􉀃zPS>MIɒb6>罥B$J%e;L<=゛ q<\M|0~8vSwW7]O*8zmswzf7y,lN4HYx'W*O_[tIlޥ9+:B\ߙY`PH]@Xޝ=4xH4hwf!, eռrMxE{\KPmouwwP<(Ǎ\fHKr-kjFaiJxlחjnӼv߾v_C%kʭGx\j|X|+*uq)Mu7^q#hZnWRnKZc {Gh#c>HKgۃKe-')mPJ"Q$ᩌK=>Fo')>l(4LjMASHBC ;\&VôDqbXy DbGsD2 L98h d"vx+M7y{[a]gd_PJѥc I UByRQ#IBepG$PxBF3py3M+%W1`i-ybP@2 TÈH'@FɭthbѶ޶m-P6z^Յw_ BqkIBn.r[1l{1Bo-n,?@i<8 Z[gxtjm|W1U%޵q+"˹ k~1PIz'HR)Dl) ^d,Ql/.Er33R)r>I& #4s$5a4'_v KzT U=}?;|ūgg/bݫg z`\hA-۵Pt 4ܢiihi%(SL,#{u*enilm-'zؿO.=ELyWM t%u}{ hURtI*]s+; Q!Vm%w +͵yctM܎lPbPBZKъT(1ZȎ&)97wd3輢>_wqa `Mx{jc;d:X[*Dm'Q9>`'$LOFkVn6d6[ WNl+xo:#¶{-էʭ^K:y/]:;aPn3l&K$BŁ4ֽ'\wcyU:[ @} ;GI;^ȓru!-֘ޣ)I`q$q/ RwVRJ)0A N=YsjgK399p@q$ /%(;[P]Ͼ̟s{s ?F 0k 0 !"F,|KXX(/[GX.o1hڔ[|>_RA6iHͤ\ޯ-t['lRra[ZCD*F!G+ FH.Eyr:N#lD#\N1twE&1JhEbBeRmPQVEk0yU1`"{&CDtƠMpn;Ŝ(ðqSdg]j4|g& yiX%A\PfvO:>my"~;[V{yF|ri1GٕQޞ sҤK.8:UY3{|k6ctNsVGs?¼/E.SzU@ZD;O8Ds29ߝK1Rg<(sw i>N3{U/5i}4ǣ%O{^-ןVo/O> I;\X2gV3(W+_u;oA)'+Q9նL8Un}f[kslGԗJvtrT|{Q0_ĘlX :f20O޲!ҙ~mI0z.ad gHK-lDxʠ_n5&m7I-mْњ*n=vnm'!rw;=טqssX4Br0IM QZQZ)01>m[>%vNvӔi"#PH6U0V,^d(%b !1̬ /*YLf 0*f-@ F ёkl\Cw[M\"K~ }Dy[D,ÑS67HWB7N^  +7‭iU"bK(F[ :)9A 3Cl,U5K\"%ئ+օaց'e*֋b1^#9c.}ցRmZ{fk?Y/OE B- Oe0 :*)%;RA&S^uYwA''V< ќ͔ZymMȟ_0_d{[Z|ιb/X:ξ#FIA񶒈[h`C!h>V+N?9L( )lią.Ȇ| BvJ=!߇do)eʭ*2i82AD0 "D*ku5.7Ց/.V_|TLm dZF-7`Dma &EprfgKEfG&ecPWEPԨLGSu>j&*Wv_ltخ!ЦmKVo^TI]׵\E4\E *> QPXD#޲tnvm!s>t|i&YJ& ˵`A(%17 IhS!b@Xq`_F ۬(!CJb 6Ԗi-91fGMWnͪ^jB gHxBuL Ƅe+A3ɽi9N*cރ=$\$I!2y$5F-4ɬQFYJy>R5,\&Β2W!.g B+M0QXKȧ1buVPj|i\䦽 3 B鱙͈UlFKeV '2Xbi3BElH%1QMKc5q햔+ϫr`c׭F4E@Z)!t* 8b4Z]RЌ香ͱcZb= ADo3`3@L\c:iS^|Rlp?e(R`K ',ye}-}m}5}͢3{8F. 1Ga|JD.8G &!,F<ܒ"/;PJ%2,"1@|aREbјp787éZ$k}^Q<wy?/+ñ3S<֟$aނ5.S0U)xէ/<):LE~x 2w'~ GZ9M FoRfqXK\IK7ܹ azR90MgX)#LJ\RgvA[ zTt8.USpZVp?U'Ϻi3@f2{^e$~I78?nh^Ts7*ow'I|__*qAYZd|Qf<<<_Oa2 R;:Wa|58̠LMPϤ>coa\X0kkQ kkbYT]AikI5VS#gBt3(JɑՈ{t;xV;V`B< Sֶ30{-#chnDdcB~L|>D`+ BT' )#@!Ci#\iҜ`F:QCRf`'0!0I$*Ű[)r'j6qnL]\)O)Fec6NVeW|YrX}7`LNKNNvm7&.+k*3Lq}ّ˪5BJ3O A8u0309/"x"|@[O5LU3 %ZE" Yo2)GDPR5c6q`͆Ul#cE]HQf];] ܒӓ[BꪛeD>*&_(`02o4|[2gԾSV*C&eJP"a:0- 9rfm~mt^5a %ƫ`Dذ; 4!(TAyc"88f`!-|VAٻm.W8оڸ_4mt73}$$jYRDɎh[%ĊqƖHp@NΥ8 .38AS3~>,HˆC̈eĖi<8('* :i2]<+ößFHHqb dgN>&4(&LR#1sĺWg-44^Ӳs\gmZP^5"iyuϰ!8#Ƙcart͝d{@Q,ܑfCˋ‡iCṖ<(l|.1F?sk0">{ 6X{OڌNJ~lJoŠ~:_j{D3>:ҥEy&aOG3>oyWJn=A f#e>ڦеnϼ?thE+ԥV_uq_y:ke-e`a^!(0A*$bd6xp.V;|zIA컁'ƞ:uapG0oz7gi+4UכFMp)DW"p1n ]E:]Etut \6D1tbht(۹+%zҵ|ҕb7 #+ #[42 J:`9p!:VK"q"`nQ h9p>Q/j2 ZFE'{;2gA66O[R8A?^~aOA>^,|5qv)Lu/- 4d>YAsmTqX'.>mQw &DoXnz9S3Movh鞜P TMc*E ;ۡשۡ#+֪AtE0C6.ۡ}ͻt$tE1w3m~ w ?K?e?KM|@aNqeŗkmbo䅤.~^{4"U _|ou|^Au~UBછ&\oK5(%;Aꋿ Zܺ?f0yr%~*:tS] }?t<),,y>Z-(ML1*ҽ\&>?3yH}<azNP -/w~f1T)b+t@ # t\@S˔[,} *;/Os8"0^tp//]PA?M,ͺnqq?滳P &=_ y6$A|_䓫OƐ.tCA'Iri7~,{~6~Ump}S[zVkb.Ӟ {qV38YWz̴, nR/Ew̶k3> 94S=N@Oo+`n}) )VNsz7j~oZ1dV/n2ʻMvVs-h7Wó&ZP6JAx7ݔ  I֙^/W Id&>ڤ)G`܍O氠nDtüszC l aBybѠ ǝS^+[fq 8 !"˰D`= CUiZcˆ^Fclόbe}0RSFDc ` Xy$R T]u\BFָ\/ŠtH|<]o栏,Sly,ف3{4Nȟ{_jbk™bT I=\qeX0Br(2;EkeGa˥;2&1JP4 iAHJBfŒR*H˘DZ-i}!1: gV46e; Ŕq+ݴiK#姸[Y-; W1fFz^ѝVX[g+d3pZ>4.ܒ7܋}Ev*ߤRmQʴJL7EMPOt_mVJꡰ 6^%oj{H̸`t]@|;'WE\y{ U5/cx报KY _|Yň7_ŸXYMб+6a~O_ƫ߳.l(c>^&￿y.cqh0Ao~ ^}esaTYs@3 FNϻ}?~B'W+K9Kypw*} n<?1/(14!mM&oϻ÷Ywb؋!G/s]ߩMf y xoE QW"1tg,O ֬XѸ_sk08U8E>߾}[HҿͰQjMO[&|dHMmhMWFZ4~[JlOD先8%3k2M"֝z+D3Cg-,Om[!ۙZ^iJ?/ҴpBƎx+^nUL[hr:DE4Սk)eG_W9|p!&35~,LezD2-3eʓgZ{븑_9wL`Y`]A c[ZINY{dYQKԖX0b9[ɪSQ] z:~lj~  ;^$}o?G^7VO_W7~w]xwvM/Vܸ"fV\x*l( W؂=ӳj_۟^il6@VЀx!iJ5Pq\\ZK-ȭ Gj#őW؏Hsv;5MJSdgbyXͫVw_5W=K,Ʊ4L-Xf]6rmq9U>b\|osU,RMCЏN,_#2iIfW2y_ipWVym_r,4,к1l+ 9RC+oy%IOH=!G+g^JĜ'UѲKh4GTj .\=nFnU'$%L g_wꢯhch2\͜y~efis( |iʴMvI\9)2~^ͽ}٥B/cRg)CKY JLg8R5DoS3׈TH.KDCVXd?ZvW̹cR)G`#> ~5gNZrm)ՒA.G|V)l<l!t;;:9X,vz/A |JoN09f#6/Vr5clN?):ڜaHP9F)UDڣ$8Dj`39c Is!3Mpۚ7 =~@vxln ?߼WlkNJ6W6Jظ!LC:N;yc2j5kZ#&V )NU\{D*K֝}2b,2@]4x$f0>W}YU X{EjN+O~G3'Q?'g󕨏^P.L#zd[dWc_a^lnZsYf/v_G{ta }qv/珳0s5?bßbmt}YkϏ+zZK;f1mY"an{Ҍm^Ĝ >nIϋW;=cLH]VuS޵|n;uy!\`Sη_׫}o"pWU޼;2~ɻo??ٖ 8{ub?>?XbgˋhGU$K\cFJ6T6ܺm I̊t$yezr+b_hȽlY_j2ge09D'Ƥٜ^]i4 s%|p8Ο4YJ?)cRJ|\KpA1Z[ .#4ѐ26syX~&-Qo0C=V ozbo-Cm4WkJ)6u mxݰ-ٷ练_Ѫ۪wR+ݘ`ؒ<`~m\#ąK 2}]׏_NrBDqΗξBvlp'+L)ҕ/O$^~9]-ʻ]ˋAǃp󞺮~{^O4%+onOG, q4p?qGm?mǟ4Q[:{t U:1r2_-XgQ T|*}k#TqqaOa_U5&G{kw MRr\sُ2wE`7+ Y`J5=TYc ^V$bvr "ڣ 5G d rJi6Dג>Rc/ m}c=Q|ջִVzA^*:#{^eF6eKҢa`3:r=zvVKcX<6OFt;F0G*`RCٶ52bEpwkt -E3q<sĚ2چJ cFǞ+W5Z\HLţ=3oJm}84"q̍|l7p/e0WIvt4*nwKG4i=Tϒ@,0Vw]{[;g? Wv*jՇ o:j^^XLZ :)'RY&UՋe*HV\G*z FJ}|U0VG೮58кSw['69VSC##$}C['Cl i並klXēOV͋j%eW!7\N, w\3b)E.Y[4ցVmYH!ۭ{H!#vt _\ jiX2)4 YF=:{; Ain/3#Y LKo,SIm6% :ı\5T˒-e] &B2G&GB_70ڳR{eZa0Tk(u1EARZgakjCE Γݬ70(6 oE#؆Zʚp j#6re=zv\a@68p  -AJ;5Xqi::o~P W-`i6Th8L J2z깁 ve>o ݥ`!S b 0kkEGҪfB8ͽcB'00W0rg5%;ܒT[66l 3Q>4Jo ڛhD\ ̑5. «,;xgx)$h@Qu [u REDj ~id0B䕮%T A!Ѿ\ؒGju"F8$55dY ) H\0;E9hAOES*p[&2Й$[|71[{ъ~slp[b4@Tƞy΂C2/GdX s7Ö9fwsZE#%Ҩ[$fhj! .-,Mh6G%M)sPj 56Ue Ӈ'i<r,W5/Rb?  I 7TDk ܻ}ƲŁ9!^ԀEL|%(z#R[x+(@fH()A^ /oV2 -leP0eQ0tQSC#.g=I:P5Zq@mRWBm~̮+h pN1I"xj+ݻ6 |U?wd \,pyTLr&Ah#hs1[l1 >݅ЪPڀ@B֚Ka,35Hh[΀|1 BXia ip# _PpW՞kHX1֬TVRD|YkȎ `&c(&( ![.H,Z;)Z5D LBH}|&+I%'Mn)_4T '~\=t]=On81 eJT}Paw~=tpU7S3o$~mSh>kf{.^Y?^łTł流bAN| R*b 0L^op9 o9݀\5JWm.}DmcP尩 t*_<}.]\⁥+\R2HGtsj\jk!607D+Epv|MWwfKW 37r{w?{gƑ$2З$@>d%@ -p跑K$MR!)J˘lJUAÚQOMOuuUwVjO:T)J*I)'oBV{m7bց#'@&G :8*]opWLCݳɶ̇o ri82%ɴ|񯠓&8Zچ3^yxX=Yrs6<0Z(yPeZq^@|5ʈ e 30XV˘#7;e&Nsز*la62{sһMg^lMOZxu7 rrs{b+ uzNi#C.`^Ŋ!RKUC,YWA h#4>o(aQFdRT*σ&b;0(5&r#]}@.q6[l7臅Pv68=m6z+4VĆT%$Q"FI`FKxJ 2F4D@Iޓk!NCyJeQ xI#GQB-RQYfg=l31Cc[XeZD4rU0Aӕ'2 Z&VT:P^ʋHTm rY-b18-EO.:lZ]"EvqҪԥ#*H&@ӍiZ{L QRE..=̦CL<+ly[mq3Gn\ ]\LǮsM֍`K0.Κ@C,D#CdK( "̤(Gp6q6~0G^v 9(c-&@1&4L cȘ@2&1 dL cȘ@2&1 dL cȘ@2&1 dL cȘ@2&1 dL cȘ@2&1 dL cȘ@2&1|>&Pl ׈aLhN+r RdLP%ݼl^ V:pz}Hͮǻܾso{74<rcvGLt}I5g?u31 <7 wyLy8)AhCXܐDxOʑҁt )H@JR:ҁt )H@JR:ҁt )H@JR:ҁt )H@JR:ҁt )H@JR:ҁt )Hx>07+8yO ӄ}aM;&@E@\~T$Ii* "0TsvoG]#+ !+ LܥƖ <|{tmr>6%7(8ʋu2CYUpY?Sq33V@xb.(T/S26 D4 @S`2 %tRy(9H3Qӑ2£%$V1x~Ur7~GaKn`N&yos"ڧMa2%( )ODLtAY5[fņ1 %kGAQ(P؏$uavQ|*Q Eu i=0ad*PNZՂuʵ }$?55uH@0'5+mg!2RWJ`8@;_=z};ߐ}c w8e0HFY"T>̢B-a؃B4?{b.c1psV\(;5 ܔT]v5yf*q_=gtS3>~"?N.̮4K]5Ј}dx0:ɴr+T@Z'"#tHNӾp㕻t::^%_BI* ƔU.S:.Wj)O!=W&-IyGjС!(8z{J5՛;M.&Ycs/HF se?e[fnvaTy^OZ'qIWCW?YTW0C=z1YGoٻMֽXr%W=uջ* D`1)1W] dz*-kUdMtuX^.|oxo?z!woo?Wo+߾˷~f&pR$f;xhx09ux!9z]s>>FU6mf1w}oO/W8[O{ffDjĆa~ ƅ6H{R۵0И >vuW)ƨb6FM,kğ{JkBcSTB널$DM8H 8J'JUyלc+3W6C\ux+DobU@|f G3)/PHĨ\ ÙB4ecg'vؐM',6K vl[ |Lv.;[*-{]^W!|Iqd0죄=C|o~:WqR~ HhJUZԤd[#KcC]&\IKR:{l^4mrt6%mK^mN -u>w䎏Fq1Oqm\_ƻX4vk7_L4Ab〜߆Nn ~էoি£I/[WXv77 zpavVWs:Qs:nݷ~-8kw[\-hkdyZ)"i͖R5+(ƗBOޖJ%lKs"J-]:E"1Y{z[z[ Y[e<ɝlnlFhvn'V #&ƃ8sl+K-84 Sw<$JS:LYD%ԩK֤*/GycXQr%P Qέ"s&pJ%~`h+QyVj[='ۇg}lJWw״=34m b?t)CRjgc)+n%J )MebBK\#O߿|7X:8.~2 ЌL_5~m7]K4k|KY2%Bj/ϫyᮞ]Z2]E;afɷzS{Ze"moa3o;)m{~o)لN_}a0ImS=L=y:/$͘nJ;ApA<ϱ mդ\ >y!IP퉬I%MDPu:XDdE =Ui,++ţAP3 rDf h͒KYVCO4KRjf0̤_Y\o{K`xʌ*%.+ga?{ƍ~#5@+|D,)Z9ek%YZYJF⇖K3qGmo&A-\ڝF@)PwDH,7sQi #aA#&6479ZrD`~NQXW,B3P,)nNT`2Ό" $gJVsl5VsN茉 %frA2酳LX*0Ҡ1Q 5|$x>\:\A3)HwɎH4:RDYsfI;$$"=ʓH&F < *C C3s<[a]mI S#DϬJrєkH+<&(ȉGAfFX[6my= D-GGO)bIeA( n`Ǻf.Ϻ9d)--/rDY .=,Ѩs0qX5Q,,KyˋR?^`Jq{p^4Iހ6LÛBa`%|+52J ?I6oM.'yAxӹL&Wa]@H)D0uJA)k<\m7"x }UϦ?@ՀP& -,t'UXMPeꑂ\ sU~NjEaPFlt^ c:pSX.mgd%LZjB2TQ4936& (LUk+RBs|YSUSOfF0Q>xz|'FP錘+7wR=CI`մJcmKn骮RLype3чXeL@6{Wɺi'ZTZAFHj<TuRd?]'wֱRT*'^;ՋYſ鳗/^vL8/$ȽIc/݃{M4-jڛ6 Mkdߦ]dkڽ/>ZYBs Jt~tnߞF)k`qBe3?}1*o򎫪(U)8\U I م5 W#,u:2xXCrPE}늒2j-G+S r.p@vD0I1l͹#c隗|ǃ0Tcv!K9J%V!Zl?)׌A;ar=ړ}k3>O,֦Eg{'%<ٝmcx佖SZ;NK+pUt&/Z zёrP=+ҬR!t[u8}y:,a ;<8ôkP+̥VlTSԸӊȵU}WogQzoJFoFw]Ff& jZJt2cp6HF7g)Oc.*5X@4VZӢ%l\ffw'ջ0]}^;7B+ }(WY~Xݻn˹y= o5H.ѾʞfOcӛ,u};Ӓ;!f\a댿@j9o5rF[ uYZy8Yx="$^'7ogJj[(xn8<tBa0tQ%0,fz@! $p4ܢ}RS$fAƕ-N;&,`57Je!٠LbZ.VAC(eիY󑇴 w(`#FbJ҂cS qQpFOf;29O.H~5T&.4Ɓܵ-z9ׄ4v1u7UlњrZ-ё"ĤTCG){ڷ(k:GX_j3]NOM, r B($0f4j2#,daJfNOsWppZN}8sE{7YejȜe;j;| A=q8%}v=`8yF.0j>NQ$m}^5bHc7ɵSl˵f Dfyʹ\Mv(ch3&<ˆ*c8 qJrId"H"0-;E#l\{H,&Pʍ"6KudXDK;kBD h$ [4Fs X. &֜YYrP@[#7u(F|$a~=h(wTqAaO1>ʨJ|ў῿ߜ(ۉo"$[aEH+P-'Y"apH#' bi \j=9Z PaVK`u#tgf!`SE$GPEnNKcT` :KI}RiBH" Fy@sIoj:\c2&htFzʼ6|7ြWo ^SNM TH*TKr{ĉgš)$TBJ}LFDoAsBqo{(cEHߕKXMQI))%93!@D&ŒE*L<)V(Y^"8%b"`c1g!DðqM{5Ff39z}a5\Y-? 1[K/|~j9ěB@[`*m :R8ăva_MIw6L %;cL+`g6"zJ"WT)BVb ,bX,P쒦߶͑$mp{p9ebFU iHF-wHd!6g[}@_|q̽*kS~ K'?ͫ1 w^e58P^1!2BTĈ'T969PԬaR/13)V2""FQ4`pHat9[,qoM锠 *O߳h8L˭wI]Ri i/E'}}˒[ \l-B8j@-H($sŕaɥ0T@vE8Ȋxbvah#, i()i iA.hŒR*Z e֐ֈ!m`=@ XR4r@fQMyA@^i뫩q5ӬjqL_Cd885$2).e! 6HX*+ZZx;.=KO|uS`6&iYҹ!fF15F[@R`6 oWFV׍'{-gG^9| %T0h aLGy)'k Jc@ L JI^</uZ &zv3P0l/5' ݯd]Ws/Yē*#Ok:[+#%]rǶyZr[O?W|w_7`i|ܧ Rp|Rl^K?t^~ӬǿggK'?O?d {֛iKwao.{?.zW9靽Zi~k4ɟ,W&|w]&/~|J՞Ә&iru=7zW"ըܘxB|lz{ qY&}/-sSş>.˻X'O }:}ݠyUd]"h篿װ0rӏ~5 b){Ty:^h)(0wmH_җ/7Tڪur岮Mrv+F&RM<$A@ieK`i?tcd!7M=brWMݿ0hk븲9Q4Tfgn_7ȸWOlՠDy hJi\k 1= 1c*#_,|e;Zϳ!T sWI;_4;9v|.u\^?MՇoTo_&M`GH76:>kgSl'Vt|ؼݖmF9ٲc:>=yl/OS1!i8YWZ:+B&!7w;g/4 ȼꞔ统~}Ke_)b3?|7)[J:A+;Wf~1t?&>Gԙo<3G}ÖIlE_u6o}V>߳ p:;ԡ݆}wIO QK&~,aqOgxOXOxV2oDϛ. b/V~*QHF5M{j(eV9h-Dc1 fl[QfeM0| -F/qH1]%ĂE+DNd,5ZX 񠣑 e"X\牖LI#(ATX-F: . \>xb,6Cv}!WCXuiNvҪV+3Ϥ"J_%`&7{?N'YeQemˤJtv5?o־EQ:/q/~zER$oCV<RW[d-DDB7:~ѱ[ }sPe6y_-- nʯV=+|rhq8o?Ϊknv@^;[YȲPWF'ŀDKv7RRE|pK!V Gߛ?:?V8b_,3=ɂ6363 (բT_ȡVZ'P}a5c4rjgA;!tЂ]VJ:I[)h%fSMv&k̛r#22Vq\\%8h>h [^|Wxq9:OQmv=ǑozvSZ=JP Yvn͔r<^DLh$?nÌ׿7fQk,XФ &m4@lK=nn5/=i'rZ^vvUa㕻帎Kͧ_7oI~iΘ~>[P۪u+87% YҵaX]LgW.'TJg?6_zF)}XwZrP oBjߤa< L%Kxȱȱ N鄑I!FRРB3bF؅m0@k=/P씑>F JLʹ4KMA4\`eΌWڈT6}VA$)l%(.>|X \Qi+I8 JX֬斸YGk;-15-SմTO]R=YMKÓ.51ϐ?U.D|Y1A),ZDŽ5ZZƢp*8ē5>ZDS*KGs:uii`g(7NJ`ܽ 3>J͟]֧'vkMPן~,J 3 5e #*J/[|,ՕRê5]߬vl|~\9'0\B.TIgȶ COLC±$4CHf̍R,yuuG{绩0QZׇLm `2ڮdj_m>QLIz9T $BTsd&$GuKh# VK,8ka,$ yJYAȃ1ɒ XRץb1J&!v悷<E,YìN*Gea]9k+!O.|'"[T{%tLڀuYI%D=axc<ڰFNAO:(KD4E!Zá(Ӛt"89 j|?k@5$re8p>Xb|i;]8݆TQ]NAI0{qVEE =9~>9~WO؊{ vJQ y!BH"(RѲtYY%9hM&I5!e(rDҥ=Js?b$,CAb14R 6$*Z A6 OP} hG3 #j UJG:Ê!1l!QA+#D&d?ʅz(jDM7\\НL.]L-CL#\!\Q%*l˞4XQpzpd=+&| Vdw*ԮUVU8D^}^vdxKUv6l 91ˇm(1^W 6ҏIh͜5GݾކxDO6s.lf؄Cb⿛NO/~8$"G V eƖR(0"ZC<\k!2kkcr,-F!ර3^ӜA|ąh.U &ts¥KH09ҝ1#2/g*ո/徬[-yljYCcŀ.SꜶFmkM`˳m3W.JR 1ѲM<ѲeQ91o RPX4!R:PA*KU.FCYz(,Ci 0&'$=h\I&ϵ~Jze2C{SlJFc< $L)mMeDA R۞#$<*̴TĤQ+D5ud`b Q,% Q֡#: !]u<3][T=0s[Δ//J8'8V.v9!$YyX_͞T.OT,4PdDȞ8i|_s3Gy "FIZ59:y@E" yN4*,;-|%qPJl9{ȅ;;Lf@$3s' 4SuZDH>;kBJL26:M I誚UYl 9 rs%F`h)6kM!fjm&"L(Yo:h=^+mkΆ ş4~f{xA(Q3+عՆ>䐭 у&YS|NIyLvU&o}swOi7BN֋j,TC;y--Y7D<0 oՃwTUa`P4Y}VϠ>͉i1>YM+B;&Mܠ0ujnBq摬j>p]]*T|l% i<'e7X6}.1'ŖȴvځJ`Q2P]bQ@; Qݪ0 xǬpd eN& XqK)丒2:J! iU iOz 8:۔WvF|<=UKn}o-vPt #Mw1 >[QS:D|;@3M* NH],gz1?z6Bπ<%ړcs(QŠPfcpr䄗|EIJ*$"TACuq?#!M'ls)<ӜZ2y$YoT ͤo'mbÜW8ehנfvgaayd%} 5 :̦u0Ь5q3j` Oх$G},޷2104 :Y>Ɩc^|pqqⷛN8Ioq|9WF??9t>)o\8!}7pl~Ak֯^ BӋW'&<~w7jr3ne_s>\y^?S/7w}M7B{F%ߕm\ieқqqcZ~_9iP~_~]M9?-7..9{!}_pV^bR)2屿er6+ 'xE::?~a-?]hן-yenx^L/W-܉w_ ǏdJ4ϔos=~wm-G[9No$Y<9[)n'v* 'ƴ61\mWʸ+.Ԡr=.+m-ϾrsUڮ (^q62\׷+1ciH@vp:>VU{VG/LοЇ7;^ͮi]->xɰm`7O-__^ls:VU^_H]IWlve'ȥ}|.kv:M}.([ΖIqTMwgwڦ8!umKѳ#3#]C㪟u|rީdmwK88{Mkx!Y&KPjx_&ocBLܬ1r%.vqo٩k+Yae5Pj"ds3m։Y!lv wNfS))3 |pNad3Jk_n?}~n?*Đ1(L(6' BȶLf&(' B`U"1jZ#S_rBVsBF, )qcUse^ wR+!-?J[eTڢl*``oC/Җ޳ }[ݔWgŻa~m PdC*6?.$M nQ~aHp ~HOX |aKT3 c*@*d g-*^D)KtJQMT3ܹ4uMAH*PCd3^ \W>A@-.Q@7Kfr?m xTҺP l1{t|j̱v}[Y[Ubz404?NedJbM pHՖ>H&JDbu+Ċ)yGJ*n>ZQ8gό2#FQ8Z4r9k#H2,)F \Fk#1%NYSwVj1kbZHKZIuɊQ4>y9_ae2v6] (m7œ>E|UXZ.~T'R}*ujos>Ijh ![lqk&%FFM641gɥ5\PkH]B޹ĎzX-\J8IBS bB1LFKGFZ*tFd7ݯYW1rY^W4gkex*.h,1fxѥ{0^q~HP-YBL=t2A=N/h/y]j瘫]\G L0nL-fMua1H07iXD18#Š} A2E1)>j<(h\r06%HɔŽ0(.Fsu\fx ^c0ujٓn@]*s5B7%fV\# KY,"Gׂ7RՉ ]"$}%Ra9!GIb9Me.VՆ{WT$ց6jɡ\TNNy|z Jo.*MIe],mOmR7PO?s]UT9P nQI${dMZ ^1CPz*2g3b9 -\XB- &4$\ZA&ՆG8S5]VPX,8nYCSbD4I[=SuDR̙tQUOUWW!e^rl~Jj9-uti%Nr-ht 6TUly֭cx:PNx<: 1ɔR'03-pǸNr&:bf] O6|oS 3x%.{ke&\-p1 ;! eIU U⮮gKeُȔw'3췖䍣9& S:^L8 ]Py$O|7Wb(lя+Oq\ m%H#}$ߛ /k1B)w 'yF)ꊣkdtR]ҧExv3]g}ƳepvɉsbNn?Gr%G.陸*:? BִFږ\ɺffZbybP(*C Ȼq79=OW2'_j#r{5S7TWZ?FBLM4H"=EHBq.Q y'!ZC=3WVe[^ DJ]H>7hXi<1q$EuLO Fp@M8NNےۦ5q6M!ETnQK]t9z\0m& Y1LI ]{!E:S0&P&sJa, ̕NmBL!j]u*R"TB61:W%f_p?Rv\g!y]T3&H^e0B#t: 1(Wy}sE?8JƻXYQ|.+ȏBH(V\IAYEny?> LK_nW7oy[.P+g+imi̕wB UKIjm) QKRږ@hS֢([E;!u=k4*Z)r+Y:ǚXto0g\?L mӑfF%gf'8r剞(|XPRj"a ܙ  G CnV<Ռ'{NPlNZc[;9a2r8ö[!zHd@rْqˋHdj"J!"OL?ޫG](/}Ѐٙ!1ڈRDJǬ* %ۚUYX`+ ܎,[jhNslMIXʐbA1+(u\&0fuҺ{;_xY m:'z,7dYiwi V]SV$ߕM" XZTĘ/eoX ӥ\NKP1}Gxh0E.>@IYiA>Q:ŷoo-T;$`1&ML."3lT ڊ(5d7IW'~pOsc3.:8P5f\]Sܥ338rY43L"g/'a/ݠ_/ B AP`x)lg~,?Mf1֦lHyyo_ӵi%?+vQrm;MCy6wm)ej+D0J@^kf!&,i$71ɷ99뜯+Khg.|Y;N<fӍ%Һ ͓$& N}Mݢ;D+a`&06oF&5oD$t3&ͨ[ 70ukbvꩠZsUJ@0רs^ a<ỢS#97 ۫/}ESJ{<ԍhFr ࠸`6N[]reHލw+ZDm`8dS2F`8efEii^xeYМ*; bBsi`ܞ"HT gT1/5dD-ڈD$B2Ԋ@Q5FYKǽ!5\"ӉRhCAk^lA# V+NfP%J@*PBQNɓGId6AJzl>er]mA 0vWp5QP$!#20uޏ~8{Y3#&c"Ŋ蔤hVD+(ωH<mv6V"܋Dky[ OBqkrGE>3D,xt:/vbHEsm:hb Rd`4.GτigOYTxkIƾnK#1z"/cONGN%MuJj$YDD#I݆\L')b߮EG-s~pH-Z_47˔^<]K6Yx W [nrv=K/./麛UuYtiWkveȥ}|.RmelɗM,15G1VMgK8tܻh;moqܢ>P=I4״gGV;;2 gN\kwj6nVC/u LR?.mn^ųocӳYEя'󖁖x7`œydM[/D0O C//w7۸ lzQÕJ rId0g12 R3HSq0T#Swu"I\ I؀&`nD$'tfQ~at>B"aƎql+_cfbH< 0<+)+ǬLYFL%JN_r}*M:s/TM`J[%XZ9xշr+p_.5b_ޮDB)LQ Zq;h `zÀW_S:|Jbuvd#,^iH&9=BbU+Ċށ<#' 9R+!ic+gF!sdұp`!rQ*Γ\ΫnepJ^vh%ZY)4JD…) ^wV#g7\YOKj0 m.F sڒ>&8]BQ, )%lg6*׍DzՔB>W CRą և# . ,i 8#RdVqCgK& ơF*r%h53 9&/XNLO\"EUU;P,mOmْ7s]UG 3\'h;q6u DVE3$JFm0ǘgF9H-\Id@һ4ak\$jKj}Y T8@Y(YeYhYxtD낌7Xf[YOf'Ѡqd`Yb#"(x4$Z/R169U!u6QnL0齐M1h" KKI% !evEn/qMԮ=ڇ[A3IyJ3LNT`ߢF#i)oHD`udoҐ H2!ɚX"r 87Y":sa5rvV_X?DD$6'C:SၑZ*)k1ąf,% C) p4I6k2"\䌏>HcrɄ@9C,1-o=jOrq,'[g5.9T.rQ rq8< K 8ݪ`9r-K$D !"qǡ+xA;IvGȅEBޏRy?.2!t~1mN?]~rp1+ nʚujQip{|86D{6.v\f=櫜A{[X5oK'G#Q ˖Eu8[᛽:E_*R9Gkǜ\bZK `Vit٧HeLǏKW=;[hT0J#pKLFJHFmkҍgA TcEjaϋսս^Dj&sbOjBʰ K9Joӄ5 id Z#(S}k}o,fwtw]ȭJU6{s urxәRZ_Z7ۻvtj]l; \(}mݝwyv~$~gyv]nѐq﮷3XM!<4w<4iH"6/kǩJ뫬e!՟b@mGM6ix^~+~kXeD˛ߪ@ A Z<%#IªB'9XW~5)WҴOW\k.sqncӣMiu`3 92p#1SHu="HJW.":MrەxhwMm~);94I5X*%ўTe )7{AS㢄U:MӴH:{/Xa I%4f%"K䮉|I/xW3RALzPeF_J&q2N PԜa!]H-ĸýdQ6Y]\6F{Fl4z6(Ǭׄ᝸Y`'$. s5K'VLi?>I\i1Ki.n-)Ddx>Fe1.]Hm=F\iŖ'O`GÈI͊gV* 7  q%;9E-%fO>$O6ݵoa5.0K%K"t&=N6H-3M8|lmӏt^\X_/\LqFtZ}JG$,qW_+[F"ceɸjEC.xk4 YU~Jat瞺֣Eb^?[3W~h/|w=?x%rE̹m<͓bn5 yMW;I?cF"qH|0b0Ygh!4he`Ų_L'=s|>o9>&7^ѐBuV iFWt}=#GO-:ESM\>jV. )?~ix(+ alO=aIZ񸀮"q!i/guR{TZylE! qy}9zJ&stMDIA)3'=a)vGCsfĹȗmy!h"YFnR7JeAds*:4Kj8vSqMWi#5RJ߀.Ts07x*! `;!A;=knO$4E=bࣱP8O^omDlts$Tl<9(x1P,n ƇIleOE,MGsjW6bjht4ɭ7=ųf%"q{Zd#/UO+5xn#懛퀴^iY?ZRQƷNB@&eLTq 0F%\ hK֚sAGF"ڀ1EH!51wmI x~./vnAZdRe)"C"{[gÚz<#BK'{Qܜ#= R%QmZv`ĉF]C3Myh{G 0yG]Oe0KE2B?)nbyav?ߝM*%~#q4t͗ wф|'$@'$oN%ϓGnI/Z4$Yagz O|:{;^uh6t,IyǢs4t[}u lX^Mct!6AKAk:k\o?L~o3?T/)>sf8߷~~0_<d2{vCm盙79qί^q@;CFWiwPǧXl>KTi\$NNMPۅ7 ?X/>z^^B%h)Hс2MpM)O|HǑTz2\^;# )CNG#aP/}*O0Z+J9nݎ\H .Ht) K: X6_aa2ټoAYЧq 1fbܔʡ7qϝw<ѻGr1%ZsۻpNKL!xxXjhTLbc,9{@<@d.y2Ŵ'd]J*0hGp,F.enCݶ{k}udqj4zq|c9-el]Bl5s<؇n?_okyˏw(>:KFaњ`x)gtbJv`bOud@[A'\#xI*gSa rm6'Ǎ**kp5q6kR?+BWUνX:bBžWHpjRMvH-hdXsV@ˆ2geGwù& ??[ul_'@+fBf~xM_LkR?aÎW%-ϓ??ovɧ;mζyAa:1Zp)%W% 8YOm B vQVk4i'YJ7( x2 g0\/Y|oiϾvq{uIb uݺro?1h(&mͫ8K3MNF%x)DIZu1"Fܨ:f 4I^?=F`&5ͼʲr8 ߒE m1Qtͫ$0hM^ZЩ6e7>/.\-tulF9ӺSZ|tM۶ R?l/.zI֦lOqmYqRX95ʂߴn$ܔ N8(<(;,BJZÁ m̑IǢ +aEy̅Pyu2,ct4.jgP yR i@--.Ekej9,3墻]]/'WA} ޠ)(`2d)g&82'Te9`,B6=\.AO]Sk 3 p!~8hQpa9R-,9 9 ,⳹Fxy=-9@ $g![a1"š\]ȍHG)zu}OՈ^c,7F8/MBGf<* ut&M{MomŔۚQ.k\ƴf/6sS(OH+OHlY rV /oK:͸qNP] ŽE4T@kew^4,OmTg?uuuP|.ڠyΠ i~k )H ƄNWթ{lN)V>o43oCiaXh0/=R;R$XugEbJқ+7LQ&OP#u# `]br7!gɥ\nO)vr4ӈ) d'Y6)\J!jɔ)%l/$!HxIpZl_bI}-R<*b3/C4O%cKt5дP䎞%ƳB1cĜAI"9y$"I ύ'S NWgϼn'RTvUs>ef]aox&t)7./Zu=Fi1.bn[zz$쪬rFNqqUɴN ؘŢn3 1-=q1Wy2fA@MAB4! ʬL`{0abߦ1"_r'6bҸ`s\xG}e?NʹkYfKG!24%8u5#>uz-;w;XcGt Fb%y:1:rU<* Sr0_zҵUd-b[#umaa,pkƻFKxtXK7*:X" !^ J`ʍ ,Ȅ79Tlr_eWi&y)Hс2])UlOQY< t]Y$iM' ?M>W?y7^G"o3o#פ4euI]!f9,ſgSz.=*HawX{WH)vp}!5M%bip~ܞ i 86(F:iԩd2\^;#Ȼ9%ps/}*'@6\m,t`Onv+1jbqSX#FǗ=Yr$hZO<ϧGbL"3E`rhc/LVSo:S}әofvS-i;ˇ3w_ U2V DwmX --αd3<8 :k̄׸KIH !bKP.]?=uwʅsct ۬.L|rV*w(>:KFaњ`x.<ْzP2>`bOu]9B:!KR9Ogqc#ʁծ&f ^g}z|3Ug^~uit1ŤBh5aG 3/~j. њbrBiЍgA`@Y+z#SEɜXEMX"J޲L>ia4}􉑃&@PcuBOqآA Vo]AȵG2kW.w$ x ]>\4ήYd.loHW6tgۮ'w~xͻm- L4p;'Xw'86Z?Q=T`G-~H6p6_cN%Щx7;\wt/=?xӣ_7G_ο`&l8kI_M"@ѯ пвazhPN[ QO]M6&y͸Wx>niv\ Ӂ-\u^W(O=aBkUM+| y -2WuQR%v-i>Q!ezNnSTh#[I^$IRȴUk ?Zq@ )\ 0`숼abУjqn$Xk'} Z%u|z䧌CXs+(JB&7Ju3nu9)d:ڕCK-7Qzѻ)9ygD6;\zs j]wubEwwf8LO^&xLrJ͙"ǜ MLl#n ի4֣OSs=\[cJb+ OjRz\TkndmfrUa au^c7޹Kv2Ue"BZq|uA9._ȹv9Ǔڒ"`,rmP&ƙARLZ5hkɹNUiݓ5ī ,0cHL)` A4"@L+B05.JB|>' k itG`}!yeMTlqҾqaeljeoLO<~s7nDF+]nT-:z t8qtT |vb<>Up&M0*f͙">g~(фPhq=y;E\Cy^NyGDw!ʸEIk#l` AT'M(l6hre1`4EĖPƽuH ÌМ C".!Cu7d8w[uay..SYŵyн6AUQ>^ 1v?) Ih(ATqaisFϭ2*Rc16#I~w(oq %[aEH+`%j8)3apHu8r(DZ'̥ia;Z P0w'<HM, qJdƆyX$­Rii !',u&K !!Ê6{^NzUӉ7# " ” E!u.%@Fג)E 0[#R{VmՍV7'm z,j*m- Zy)^Oǟ?&4"gdvYHu*(1H4-j6@`B- Oe0 :*)%;RN'&SيXH>e`h v $nŜt Ƒw#]mnrs|tMiq vs|j+ڷ >J[¸;$`k6!T)iäPIS:#h&.Fo޾is~ ­0V+(vҹ߶7 DoۗjgWJFI {s@$q$rϑ ֞xIKޒ"Y8}i>9h޴q  !"F,|*N= !&Cu"X`T9F@ D"`"RSFD]!a$EV FǍTJݠr>z]InC7Hٯ3|4~ޯmm24Xeɭ.!`KbzĹʰ`Qdw* ZYe+E"$^ظ `xvIj"j FНmdE}sx6X;ڧ|y9J_7y49Z\T=Ә^<9|ۺSSo_M %jҥ)[D/^5=Ú-xa.ӿ}~i ļ-kS˗EK3W_zeFe0=읧gNSʵhRMsņ/-'˗+ރut^"`eC(y)\u+]7X;ٿO_C-Y:`~JrDʄZ8Aer*<@w;×q+nƇhNIx)f,dƃ [DE{]w'_gJ0-J_43( ~Mg ~T]O8:z-}nI/e`EH'&uSYwRhJTtxy j-eȩm|.kvedX{ּBtIrzPwT\W̗ĒX*%،^K'+yL& '??_\c*\& L*f%k$Ȏ }N!]9g&IeJLt>%՗$|P~,{.eNJUn ǝ$?ӛ7aM0M 䵲$NMsìȵZls~cau!$CY21M]Kv^3{.mtv-0;gNl:.g\K^D-W>QTW+B>"q`5h1y, _#Q\ W?R_ˉ(٪Qr^ZNv&K$9{B@Ffw8퀸_wod70Cc][o#DZ+ľ$H^9#@  )?ËPՒU⌺kf!G>00\(($ai0$iZWI){ 0m3%="{WLI/pUpU]WDW \q \i_u}>\)%•(# H`1a_Hk{OIJ=Uq>5 \qUuHi]•a]ߧ?9=}LsqVΌ?leҡ?n0.{'>-]oz6;>oks&44H_i ^ X_~޻v>R ַ[>VV;'LR+eQ2E F.r b0LBHz2\ UݲdN2ZKBdcJ t;=F98_DjY,4 PTIƁ45OJɫe|([[:/VCoguHIIБsMkŀR[3:%+QErfrJ}p\JdDzjRxRzgBcKRЈh!ʶRM-{wAd)[ T>53n&aXtŜIV?K&MA.u^gzXCxH!9%ϽXRLeStYE[KʝUa5꓍уv`a="w"4x$&HReGUD+$יTs'yrF$f"?k`+2v]h/^\=ve|H!!0k]#,V̾{Hz^Kk,-Y۠XYB)!$]K&glBպoνEh|w5wT렽K>4go]k')>׸8'L0&5֩1ЬQ&FBԍwțl̐a6 e+7a3 Ǽs 3/&sBĐd6;Ȉ EmTU_Sr)vd༷L8yD392Y5z+ْn߁[߻;)ğ@'&CeR69o\C2 98HU 5I@KO{ȗH*A_)`br<0BOF\(adګʆZM 5_oǥ}k7p_(5=ٵ(\wS^\O]P6E%lSi=~bHmÀR;[l<ceCZReId6B: %ydVE3D声d*2j9=h Z$'Ix3Y:LQ:)2Tmd&L|jXXM3vBXCW-/gloX]Ohxv;Gl(205PD@ J6r`Q xݝt,QȞ\BckX ̥MRNZ<~:k/]M;zڻvPxD^Ҁ8ˀ8lR}(d!+A Yit$1—ĢHHXKM#a}|̊\)" y[R|SZ/XM?vEDVـ"nxY2 @Tp"k1ĸHFq<|\fd4ffEWEDkFI8))(y.8F& E2!C&vمj<_O=jO8oOOЧXg5-Ue\.I%se}I@0jHӭ F/Dz@#>;IlQ_մcW ܋}6ooh7@n nF5Fw?.2!t64_.fV9mȯ t7G(5:5n?l{|<({ig߫{\YWwb^j:1 s8Mit's¿Y| {0׹Un<_ɮG,)oK{})|#U9|58Z㽐1%2Z א{`@ }D.gb =~I\{A/w &#ak&ɡ(QD]/偔tߢKV Z XEoe#sMZBZZ\Q3 ݋l1˜:2!gsNE$1/|*Q_WtuJB7O5F37kJî&ٗ+I}cp14G @h}iJZ~EJ>+~ @5Sz.m^A)H~ELgI\prx#wQ m8q(RlFJl;T+畊Κ(2Aоtts)UB6hKPwP8ޝ+B_BܿbElC_ظ˓9[O]vWǜG%X坍XY.7=>,{!b`ʥ2%$3f&pc#j'FUq ~tzQ} MVmݼbM L3)_X׉ϊ+Qk5*8t!hPIcsc_chV+ L@Qx+X%M>ifHFԡ*fX=K鹕ԎY2ЋUxxQ4|ImGhe/fv@bߐ݋ntuf&s:f; Йtt{|s/^m1ڙG7unyߪ-\ r-zMwzC3j./yM;:]\9ݏ/Fl߈syFK*H,|s:Ԗ9A<S74NZ0%$o $KwP3TG g'jh*$EqFPsp5s Rk>eBdDjXI ;B%W-B]8[R]3/my*CCKJ"t9۹O- r:=>x,FtW謆5rp404)$uR{ 2$U Lr XZi5]V],,=YJ#*e ) NHRk1$HdO%,~<=["=N4۷v:-cFzzT*+&E鍔A*gy^Ҳ)}B"=_u>SMv= Ҵ!)чYJ «Ui'i.ꠣR8`Hu iq6p/3&K-8ށHAdeGk"X,fjq$ԯ&Y4pq`)r4Fj|!jRBh%p.GӯE"$1/b?ޅ/b sҟ>lׇ6b+<rW<#bQK&} 4+qYٶ/!2P20Q D8pY:KtjűFjY9X\HޥKw&=,6ZčtCᨍ}Ne1yuw>u1őM6Bu%]] $DwwZ$pF#(*Vgє_Pz¬."|EkI)~qtJ#5-O/5 Ss~W'>;gX s~Od~okRWmO'ni蘉SoIg:뚆wO#sue 1ɴ(.|:9^͜妋Saˋ욵Z!_:+2""f_yT"쀍aPM:`(OHxݧ|]o}w?1n>яK\@ Al,xO?~IxJeGyrj+$GHl2y&x6 !ZzG*rVp[1CtUyzJ[vFtۛxO/ʓ{Ij='c"Ӏa,x4Dh^ZiCrOzz~cСcbH K4ٻ6,W`?zzȇ$;` &.aS֘"i>d;Aj6)AI$bض@QbS޾+8qޛ5NQ(\ggqS6'vG |^|ק;w!ag[N;Uq\wT~ם堄 {>[|S/VWqV| Ý$Rp]!!cB.O'fCe]9. .5_mH$6$?4q`;c$:OZ*n̛2>)cuu{p|l@~4] Fy7vy;Br67MCÏ&cOP󗟅 &5Yo&-mpuF{#]Ͷ$i!V&&irbJ ,!m) Q2օ -P!*NY'm#m9W}h$M=?x-<4'`VQQƐ̀!!frb,W8sj:sd8OnʰѠH^QǃBܖi\eqZ2_ϗ6QFbG7N::t$)?@5[hQ6U /XPRj"Q3At(tJDB9RB%pc)tz,Qwg 0Bm6I7s C֗Yf>^8Wu%'w$QO1BaL+1czӁ5(@E(p+C9* hIhPld t2q`ʩ27K)4 .JrZ%I,hN؝//oM8L8yΉdD-ڈD$B2ԊH j] ദ(kux鸷9%{HAic ׄHtW!蠵Kvt\,ʳ"%{ORɓG&M2F ^bCQIZtb9bAձ ]a4; J•DAc0bP=qpVDEiLDIW fExs}Y38#&c"Ŋ蔤hVD+(ωH<mv6y.WӸ#@ j%0NyIe%c7䢦sq:9K"ΐV!mk1~g5.Cz5EIxZ_<݆, ;çqѢ[K<4f;>#A:'bQZ\LYgu\ihYk"d!tԳz:yq"6\Nj$D队RapnYHEB2U` Qodyc}XoS:, =j+n=Ț:HMS0JR_\} h+8x Wε<-gE24_.Ty4d41ܿ_w>.my|Fs΍huk'<8æ}}#I0ڱ"x)  /p0)kۻޛW ~x1X'KE}mO F@@B /p9q;&oA|Rށ2(m8w\6*[7n?ݩ]%_ZIQVݚNˣ8zן"*z-T ޶aw^ =֮cG uX-Xφ\|e[La wOyĽP9^>T|{dwYFV@Nbyq?ŬFq^ R~uve>> jVK鮗Uu]O"7%gbRiRk ~%)4JA\ԅpɽ_(6`z`Lܘ)} REUiqgf#p=]Nb!]2܆Tz|ztlPq8znNg xQm*{dvհͫxUVPޅwߐmHB $4Ypdp3{[S5j&L7{ j!E"R )8yzQtKK.YP&s, v]l  .ZdZ0F1-dŴ̼i!Բ3- ӂΐ$QǥZB t5ڣ(UgTOeێv|3h]!`p>ʃQ2 '6UlXk BWht(aT;:gUV˕U[#]ekOJWV] ]UL ?Z]!\EZCW MRN$#U 푮2C+UFM Q*B;:A©GElWpm ]e`Fttu:tj[ٗ4DGBƑ xS;]"]Գ1KWs*Ik*iB4%)ҕ"b޸Ԍ3y&ŕT}bV5;Vp9n=Moӆ)t M:T0 EtY*-t˛;::AbB&  BJ5D7@UHttutŁ(0-+>CVκ 2erxe04+V2\CBWV:(iGWHWJq"BFG( -t(T] ]I2S;Fĭ YEWMRwS+%%Et+pȀh)k= ҕV(Ԏh B{VUF:(*m{,QOę2 Vw5J&[CW(FBo(QWhiZ\[Nͨ'm6CzRzRw懻Ys4Ԗ)pԶL'ՎJCێ,m_0=DF%ֲ-uVoP),! ŦmhaA7H!_M~ _~2R_U`B"1hj|n8Uu&8Wעѯe.(3<-'tq_q'cԤ<ᦃWx #uޕ=CYy2r1k`jy覦HL_/o&: W;jB$n17Wmq\q}L~rQ_[bCbFwmH_i`/ &X`vb,9jɎg1[deeY%OdEv}duM$%ȓЁР=G5-]U4N PD<R50?1N4P21CGہ, } {KDy%,wVz=S̴zK?aͨG/&{ZpOF'ͣ8W)dVyn[Q:qNyмzb`Juq9}{Vnt6- x~Z(B4cI7nqB]~tN%4-f ! |aZvD9LQ:5Ŋwhl9P(f,6S#… &)+7Ձ hXWyp ǿK@rx$qMeZZ~_  5ʋ c/X92xz߲*py8̢2^=hZrCcv1b]fJi ~AayN*Ro--I[SRf:rڋ [ Zk5cE*P!긠6U"2*p<(}o2*S{2H8ehz `FYtȌRNCTTqdQRFmؚ8-c{X5[ؚfli 9iڃ-ܩ-J6gd|X:1/%"v-0;P^op떣bk(D!I㵱p %('$)# GexBӨdU Y#=3βJ%Q3uBn%1aAX$ib&z8C1j{bV`h"X-1웉%e.h+gKCM׳O:$':ÆQªQr{$q >rHLϘ艠<;]Nv{{sѼ'{ʳOYp3W '.J/ޅb8vi?),{g/`G7A0-d85>K-L%9D1%bb:Mt~ïҬO&9Oe6M-#ʿմKh\(GuhR!LʈۼDwCDrT68Ŗ䙫k}7`2ldVy" u 3\nδMv|"!gGx@ UByH`IB,jG@R'w7T:>(Bo\Am4tpYiLkS@4: 'P$!5R#<Pl ;+B }dp`Dd›D"wګ6$Y$`O$CzMI\DsN 8xN1. *@Ri+ Z0s3҂U%cwD&2? ͢1c8dXЃOϯLW=~d9”*!?b4V^  Y.|ə`(/\e#A2erŽ.g8<s߻>*GhϏg&mb5Ԭ.A> .iUFɩ;NHY?B-Qi; ֗m\ʐ4=j򯦼/g zQ(V9)nӰCCp3mq8ˎ(UiП:smjrwyy Y-ST^Gg̓70˧œo~u?}M,ߏG3ןvn!Kje krqM˪a˫o]^Xͬ|ā?l`<IG7uvϻUZ'\V벶j|Zka2Fr/a}l\J|LhgP߲cu(TN.ۯ;{q7?1~~{Ǐ78 {`'"Ӄ0ܩ޼GպI89mP%mMm6yIwtZoEha:VOon뉛d7[nfMd Nb0?FMT%®/ʄe Upzҹ0+9<Ob;Iߝ'iRgH~0 1/"*P؉D'48*! WT}\t\(1"qg[&R5IHV𐤠a_}F}9њNlycvsooݝ?@j[N< `ui\w IӺ}É?؏g8䇃 <6wu`k9EZmzi!]&#"|4 GÿomTDɨhRyoe@fcP\(^lNoع:~gy3B u:oc'WV߽=/{Kp_h^v߿u [>c:oSkw[Lsnw'z"4QbgHkr|A!VPl(yqX"fN/eY zO8t +h}NsgD{ {?A cpm] W/`9˫'o]q+=Xps6CVm<+#w?ganƮwK-yG 7y9-tr壏%;FM\xbrnbE a$ʘ%B2X*7(K.8d3 ՚1w8R m_ښ8GB`0JNOӞ:Us,Y2 Rͭbh9F&J!UW+mT'A0pԁ92@xbDPJ!C;X4Z[ruAjS#?P21C7D/(18-(K$F)RrhYiT˸5q6d͕)klNVefLknf͋nB!WVb.ER?!e .0ws._)tgM_ݛU7/vU)&O JɹZ2[^$uM7yd|q_]Sp]nVv;q親9i$;9v14AW͟sXy7 }/Kh4H-qD{`.8Spm)'9grܣ%eYBJUiN3ƭ"٣VZ; +PS6'vgOZ%5|Z`[g,HP'NX0V$N1&KDb"14X!HP7YcXgέ_dw qQ<Φ*Iegv WY1MjDɞKo"m6%Q:"L -)G{MF&b^Mήd4΢ ()5E 6jy;St.SIif=kX2J8ԙ;jȽ 1=f#emʊ"+׋Wt<kr0zo dh1׿H u!qa@ s74 &j3k|GN BWTVH3]VyNuѵPTj7Ua ;uw"xPݴ[Ѥ:IU<]xvo/~)YYmC@NK ?ʎhJNk?z|&G+ ANHG鲱| U!Y{:F~'闝ip]v2ٝK_dORxuѧ/B~'0F9#e $x(2>H?Z;r6qHH { }x2xwVoZr5][/43ʿO e~s~> Su=bZthprW˗+FǥǖY[;l泥 1nQIWv6iz1T19^|&mqY;Y r1vozGXA?}WntrLN>Pwɛ~9_N󿽨'7uܟ-.?~"OԘ'g3*%_"9Tt\M['^eJq߫|O3KWWgiדߢP!9. R:trAiggӓɷW;of(myO=l=vqا4n_$qE#w|lld_M^>7,12='׏ ?WBV~s"O.ˋy ӥ'lz7e;X+S|w}sI>k:! zAn^DdMFzSKO=_nC>O~O?2{~!-IC'TˍY%qo^s'.ܚ} Z]ĒV qܠ{Gפu'ցeqLVI6T{gJGv]v٘W}t+9]䓍ZBZ%ۧse~9yƃ*KF] D=LyR Q0MR,->e= km,*F@#79wSS pH|=i__K_>?ρLpn_9OnGZ&t`?Ay^==Ѿꃴo#L9 C=x`Uu.*uI # G5;fXeҩ=eدO-}fEIP\] )  b,`eYTx޶@Vjix{GtT׮{ԋ`|p0sWH^ Lz*V Z- Wʎ酏008 K)Ut+jWUWG+hbָfpr_ZCUUuuB pł UkObZ ޺*qu2 6L\KaRcĕ4*U3rȡU:iF\!ΨpduiWU.VpU=t\UnWFm nx_lzE-f'o67N%<[3Tk|5X?$S{v"݉r'(B+I;Ynx&$6NJ4#W4+P'X ڧ6]ҍ1 4+lA5+z[xTdG\! +'ZUU1UU9G#+ hj!lWU.Q+j:J;NWҮ!\`ڙ TBVpj ޺b8(qU+\UR{Uko]#R#W--`VF+GOTbRKG+B ؛Q* YB;L)(tYPl0}_jWg@cR.sżgykNtOgY\<_?_[%A#Ҩ1bwyq>-{T6 \8~NNGľ*;b>p^?]wR@,u>ݡ`}Z5BV~ ~ Zr1jx}v~[%Xat/quI?ۯ!CʪCs*O)kS7WS]|=y gϏjܱ>ާ7HQOb3:&\xSN::75 AqEtfWuWB4Y1ƚ}D>x_T2ZKQ.Pp< U0~\x "d'}k7+BavS9K zIl@płA*\Z2n踪*quRȴ+UK\UMWG+n`eCbL3b$M+jrhg$F\Whlj+fpUf+V RWU%cђułc]r+V bkW툫cĕBؖAfprnf2j t> #W| X*W63XՂ:JquָE%iծ+[Z'ȍ:B\9y?wN<ᷡxfQevڡ 4 $=tڿ;pJ%F/tϪgi= bIM:Ўn*#J;F\=sXWU ZeF\#@(!\` CGMuCC 6[PaT3b`m+bt-T9ZWLj+J4+ U Z=`U9#k!\UV6+T3UUʡ9p ت`L \UUq*8nru3;UR{U䈫cl W]]oIv+LUuo} $byHv^i EEfs(lg- $oU{9u4\!o_{bpDyW`KV& W#K11?~==;#' mw-\|/_NBER%P{wȬݯ=x?||]Й;~!=tv.iYD:@vLgfDslpa}VHt~|r )Opp>O-G%}xO>./{ukTܫhVJ^˯];:A/K'4GFm~Lò/||1;=yq`O׿*;D\"iSmX~&=d;ɱ%á i&ȎTn:BҮDBmNuH1!?u}e{uAs~o0Pw7rr']=frLL &luRx|b]m ֙;5hrʬC#UcSS!R*rXc}ʹRtUsT'X;&[[CӓО;R+UEB57Q=3dRWY-Z0Ę3-GPD-o- 5Atlb(ׂňia軦^u9EJfqbjH\o^RR֖fns56-omSSLjDj=t_Y34"r1 Fǚ B% ?"{2Z#9Y{[ M1tca'?n !LEv(ιc0 %C'BȥaQUR=^bØ- oTh}GJGfYWUtm/Ri >MXnj,@ɘu!g5>}j/j35ԺVTC+)IWz9K!7}kt KavnE;IFBPhhM?aAS‚\* PmdA!pF!j36zm\$FUy6Xԓv틅JtQ]|¸]'_c 1!Ku6X XtfݠtGlOMCw_KͺoG LcmTςji.`ER.,ޢ@X^ XTl`t[w \ u9u<y/LV-XrպJ|X`,)ϤKgǚڨ(E!(h0Q 2ֺz6b>s%ps L- U e n6Xe=Ҳ9 k9qo:P6PB ׎]S`P|%'S\x_b ~`; X-VRԐլ 261ِ@&44:n|$K\2X( Źtҝ2O%e:*UP숳f2hYFO-V0!ЮHVv\֕ћAޠB+X70c:omB%%۲mPDfEvcŨ3| ݚXXeq`i/! OӗMe :[*cx (n^upG炅2YЩ֏oT>/\EYxҶAMRF(NEk"}r_}Tw}4SMFS 4"1w`ڧ4yЋEuD!%:.h 9t,'`険5c0 l3"=ыv AnK)A2vܱ"-tC"+P(vABrF,V,u$=`9$qR.Z`*g0`BZش =A+ׅipf PH՞Fғ0 JrlCɵa+V ր' JC` Es6Ք rG(0r(;f6iM.Ad`yĢ&jAH_ m*?!u+ "S>S`j?_or׷{qzo ޖ>yĶ)%F{ӷ?t'$4Fw/co>b=o[K9Y]o.l~HϯwM鴶`Aƛnos'\ox ms$4/c[jVN۳ݷ]w`dΪxrݗEf?}ۮ~Ignn㧶@פ )OX#\A){L_ 1|-[aWEkҊq{tE $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@n@Qyf#`{<ảqh-FQ{tML@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N?ȠcrP'{ h3/0-\\uٱ u\J%3z+E)  P jy\JSMW 3 P ժ·QW=ĕa\bW XtB(&3>7\9vG\mRrQ dbW@%#Wĕw|: xވ!g[;Aۧ4 ZܙCY={;QċesSR9Qɪ-6leiR`\)ҨΥ9`@[ k$ V'L8fjOuk3c7AhSOJeBB&+kTtWq)MW P P.' [WRg\W[.uBc]\RռBd\WB0jDBLdprHWtD6[W}ĕjW tA{Ԟjf*M+HBRpD+P+::㪇P=)KW(Xdpr5OWҮ Tj'2WTg@"TTpj)3ȸ!44k P ZF+T)rJnͺqR"f$P(I,b T{`(F1H0=&6SOk\3cO-zq 6\QNp2BvWke\{6,X:BV+P+:Pe׶ϸz\q, k |&"SB*[W}ĕʈbW  P. ղ;RUq%U/BJ&+P]Dd\WpiXBJ \SHմB[bg\W%1)ŮK*h2BR+TEq*MW¿EBB:yW($ Z U#zkUǦ҂#GWX<dkUh/wVKѺxtNH3ꌉDtNhTr\]JPxJ 6:\\kR'5J.2z+pU 5 tbW(TpjOupC[[he\W$ބpU: 4\ZygTnq\iF-c Pf[BZ+Tk;o]iW=ĕabW i @.' 2u\JqG\Yfwo(زdprU:BtWce\4nS9lP TJ7N5?C3ć*7k5SOdU6SٱZ(mi1Ԣ\X咜z frπoV U*qC\1NZJB*\\uj5u*͸!֦ r RSBg\WBKŕtbW(Tpj:PUq%/ڜ'+Ku*B':JcW}ĕԞ~u: 58Jg?x\JqG\i,OW J P u\J>HLJ ~bm dbWV Uꌫ>*YJPMǺ& >[x\JqK\iKI7ϓ nF Ui ͏0SeFRʩEe2M>(1UY(+5́Hרͩ;s!R͊mm3ߚ2:r'5qjKu>^NoWzu[n2"AYS+Dxʩ@Q9o\^Ϧ>b~r=e}n8f{A5b7~]͵-} v)b?]qSіWӊmڤT.̆P.Bcq0'xA+ɹVcŃQGdqU5h6v0u YoΦUq#_1FrjL ]4RdiKjU |IrAH z|?/a8q[c/f-2/[,;hmݽw~ד t_~#؛2 ].nqIlqyuVG,˸;syժvg_|v'w)ݸU;?g]qe~Qꢀ.>+;SZN /nwۗ/o:U l|zslHRnk .*sD˫75}n]%cedb$^ sXF ;ftS3g16t~PfvGskiy2T L:TI jL  /t׋R2DŽvj7qvQ} ~Ď@-C#m$!uN'A5߸ rb &~K}{h!!n-qi^|eI](Ctǚ1p#,kg -MimȮwRvǸ{F[eƞV86O75W^U\2b D%S[ n #%4IRjǂTSتI/4LjMڿ.4.Rx4˨Bdj|8T Z7[y{.M;dvD%e ex۲[_.G&鷫uȬU0%gT@𵫬QFJc:aR'BKb"19 tuBZ3!,uQ-74m u <͍KE9B9W@@**II1wB^I+d%KS*+~\Tn,X{i{ 7j4L+'Qq6 =SQjɅ,BDk%R̐Í5>E.x5*lqdǣwE]^L W!n ?C-{9]^*'96|EGVH=#qgr`{#bkgsG9:LwԞ<ʼ#y`$<=ܸtMy#דYUe]p5~Zx:Kl$ԁK >ڠ <"XyQ̙}Vŝr߄Mqk^9>S~|<~#?w /~edqǼ@ճܭq[rh3e+P휅-ޚi e- j]iҕfD&gf{oM;rfƈg%f(Sx9si ٔ:Ɣ{Stx9kVZE)5qRP8J h+ɲ4*P4 i5LD*C:V$Ѐ별a_A3%sS)"TNPWD|8Hn'Fo'7P`ga֓7p^؝Û/_zڜMe0igBڊRxzPJs_Z'q*Fa 3x[i+Khlz lw߾3,-CsXw<moNHVkgYgZ凫$7\ RzU$PH%82ƼpUUQ. ؇*y8iw lJĊSCtrT}bN Jbe59NYyWd*맙1[sW}OIzqz3e֣͗\50e-Z=!6[*y RiNiKNE[k :YuyU32NPڲ;gqʫ8Sb|4Tp^f "Hk%6 =8Z`/?Tӟ1ݩ;OfۣLh0YRH#)%wk4q1Q9|*usmҔ9xyًL/3GWW 3" &!A-zFR>R y\w/f8NL# q-ɗFmt.imKNʳJZV22V hwrfg}X{GC(j۳7o %{ c^0 Pqcwo񧬱m.9xGS;OVF|̯:]]&yCap[ a2_no߂x0Cub/fr;u !g5* ]VӋBV>ShputRωzf{IɷZE}΁%юgJ*_MbI(>9fʴs /~qN"kuW|>9N3rylj]?G߯ߟ1E_p|ya:]Lu[7YE5T $I_v>݋6fGklwf #OmT[mod%2v]ʊ/2Hk+Q-n<CaN)*\5b1Y!bf\ڒK<-ś|8:-^. Ӌɇ䣴?asT=o6/ow%]!(튟Nm+u6 }Zw1N+PyPb&d4t+xNWOl>eOŒ^_L>@=+.>?ptgQnI/UT`3/MOP 7l>J0tg+ɳ[=n;ЌǓwrS\*kUdKd#y@dM{7 QRDt[UC:iL;Tp68[΅Z Tk4yBG /3Q f%1!zL8PyzӦhq17h_TZ\Ub:nP8UԎK: {w]:=?)k؍oէ E9}W/ hu-ZMjf}*l4N,9-J-AƒEiL_RB>V5 ,U,:c7^9cptS"$r9j0 AhR:0#țr 8 ȹVdXDcP)J)!RMg7+$,?}X`Ԟ)PeǗ)S2Lib*O$B;;Asɯ#*e:UdLkAQEI 0ă^bD,őAry+I$] Ї|]ۛ7Y\(r(L<H@rMTaN)gaun-Yu5'sQ9HɬC1Ryp@=v}슑fǮ#h~^*UqƬOco%W> yQZ#Iv=`=R6>d5jJ}$j3yZGYI*on A4HY!x0k5f,`Z'iDk45[!-uln߁ڛ_d Hf"O8\46VY#@!Ci#\iҜ`Fʨ! pGE3OMCaI$*Ű[)r5tv3jV'P$ñMN WCw.%s8xQLwJU>dsdL$v#wSga OP& fhQ(0хT=rMgĶ_]٨#R<}Q4X^=H#hoh%dӀCPVDAyD.qVq!'>h k|@NR2 .#U @ud٦[V=)M+1}*qf{Km- vuĉ ;k>耉@((N@Q%v)=2V#mVFHH8pFBb%#1S,Ē iK.wl-8Ƃ\IdF%EY.^.rq3l&,^PSTHjT@eEJ C/!"QǡPfE>Q.S;1x3-펐E\DޏX wӆ=x>L n(:#%蕮d&X/JΣ@ | snWe'yrǃ<nQĥ>1+> b\)1 ;ZJŝ7v6o|0u9ӱYE(,UHoE5^[ }s@7×SiK0X`"k"ՐX&HSEu2"!FMt[9x$ɹt|g= C{#m maPL>Z5gQhLn.5T2 ȅ/.2 Ke,!]Znu O62beP=yH*6z,r{QZetW<ҷ ZGK!zƌƁZ1b@3WL#,8ZI^de2T3/Ae(nzN0򕔷M.Oa\̡_$?CeO>)߆Ed|rӫ͛x:f>}FmN0yOz# )^?OiV{$kwM*;5˶Sz-jz3_b܎%^eA&~&[W-GZ~`_con+F=i17/=`7MaJOCIoS c3]=l$ ]u vs4egd۫ݟ 2h*_"ͭў(8Ʌ4KQ( z|j̞r+CQ߫ LtU|w O˺a9dz.lG2ݾՀгm"ݶF{$%Fa+=Ck[uny+-l1p]nEG]yI}n;YSk}+oyL~/>'OBn=6*lQKlHwi1J|0d\b1M!^s yx*l f"R ?AM6a> 0שb4]#W/6[";6ad+ΰa|QL 3Kw)lp t,g+8:8AY\Bl9ܳTqh6Zܷԋ\gmlTMvzZ^6"1QTJ1 Y&t{F@cxiP)@ 5V 7_u,Ogni#?{WǍJxG{~71`1ҬG=9,+A+v#i乤 d#Y.p? 9@ aqXFC 4@2ܦX E(5+ʺ눿G. YŘch5H+<&(ȉGA1=qFV"µTS:my=́_oy SrRgFʥps X_E*k fJZ&*%A\zHhT9Z8Т>.e!.[ɨ?GR`3;|b |&EH@sп.c4!a&jkܧPR{eV^ 5!Kxoo㌠v|rPrzԣaH1dD0u.S1Qh $x =OV85!*W(p tHםK5yPTI$8e@S$i/A~7״NOOWRE%BZҴz i,}7E7m>O`L'a T)9L~̖4}:O2Lsuƫrzn#(t|׽E$ jOq;Њp2ޚUՐ0E=z1w3ѢUwPpFuU[|^(79CbNpgjAUJ@d◪nQ\wp ??s7W|Woޟc` 8 l%3wTOT-U57&5hv:}6yEwGK DWt\r6 /fE>W6U&enZDPUܕR% !*^g]HF~yT?=w?I B S! $;"oQDYqn$ޑOzÌR{~;Ơ`m|庎[>vkuN{屶EU۴kF] ux28&[V_Ckn7s{ yxoF=UNÃ4$P-WyO+"^}V0#)6gF\s6l d*? j|= P#ĬW #A29HysQ  %cm3'S G)s7vP 3+¢At5j3ĮZJZWϑ`_5`CWWcJh;vJ(Ezt%^Oly+Tޝ쬶qܴ10!~)#e6쏫G=fg110;HS%X}5I)`Npn MZtBI۔k= ͑:6aj3@+n@Wmki]%w7{a6CKUBY ܴt|"1o]% ++0k ]%;]%Rt !ʮt n]%1`BˎL(Ek]=GHIDWX1JpyccW -]=C#`Ehc*)t rtPJ3+M Kv]\ڜ;]JAuKWϐL5`-CWW"Jhf(Yjt Iv,es]%1BGOW %ngIW !%u7?)>`NWߤݜ׾bRܳ)tP qx*'e#UEjџ'$(/> G__w }\:f|<o-4FDj{a֐2?nu)()W2: {saJ.SiSِY:>jl;9g6M:%?)KoC_޾'asf͍/ $p4ܢ]ĔùpnϕA^r# $g\Kr1k6ߵ2ֲ^FvYt><y,![n53'1bePGiL" B_g0; -l' -CJ^?~B lkC =& k  rr\)BW*J[ztE(Itk]%O}'ؾ@mIRIJWXJGNW Zz>tŨV5͡9Vct$třPIvj ]%1tjrtřDvZgKWODWJ3_a'Fݢ; >(%,+b:ytr{wyWq]sO8R\`r΍" $Шԁwik.yו;hR _՗lZ]Eh HiNpFqPZm LJlD $顚նzpp8r ^WQ8r]*D06P)/L~~v}TqCnl9#VS&c/ϪEI駺b< 6y^Lz[ȭE"tvLJ$io~]ߗsÇsUm+>lz߂?" b "UT+˂B]j 5/ yDȭ"RN)eZ N"RKE<ДFPI!^Q,Dٽybn._ a1A9(EKC8trE؄ A2 51jv`K)µDXF-pPq Se68 bziTq X4d.?;ZY:) /W0&WTw]҅w` ˹m&$]=_mc[۹k*Q+ŧ[?N HAZ1¬QBd"e5 C,"e>jlpE *ƨS:>z!!FJ9Q=+l i+j,Ǥ, L,rD12&htQ`")'A_FasB2 \Mڇ<{ JSg3XBy9Svw3-[tņ_J [K"H3@Qe܁´gHJ $Dt9yxG-i8 =t2 eblC¯5j Ӗ\mm31X |hS-ǯ/x24tf @5ƿ0* fӠ3^^{uf_Hw7y]YE'2+4{" ]ǃ&?|%)h?)ߟh F`ƛWiRX}5/A˦_ ݄q²8+-wLG*'"S\Vkt|f:L_0~ W00<7nq mփ5c?kcD"ted\?>8]Z;&E9L4w%K~lxVlEl6r?Ϸu0Pp5Czp)lojRzx^s%0*҈Zeά*&|L}S|^->e%HH޹~6Jj6L %6wJX:Qf&*^wjtwN)BN#Xgi`O7%+vӹǾm7 .9r>)ZrƑȕ3>G L0ζQ%nQC@y?]S~]1 w^g58p^1!2Ru#lR}ȱi3EA rY+냉KM A #(H8 zହeihzSNJd*L~a t8Ϟ{xKS,Yߙ}o~;Oh"}eɭ)!KbzĹʰ`Qdw* ZYeIbCK  ﴋLRcP4a4ʠ.hŒRm#ڽ y?TڻKz]n9*WbL AkݯD(Ơ=p&9 "Vđ6q0;kq8ZWz 'E5tV_x5h~\Vp/i1x!b& 9 g׆q!<t6? YzA!&-\fEx;!`sgx"SDd~'sCq<{`= q-h{j6:iA)gKPY%iYR>2P 0R)#^kdp`hQJ[v#@<1gY/PYn^f%QhͬVFD8ʩ<RH4iEx=vP{OSnnc+j~}?{V3[_,ZNqooi.B4ZicCp ׁ<o2&жZT!&Xחc[@S-g즓6&jfk(S\Pv}<3TMWݯϏVLҚ1LbRBwr9 rNqǯ8.m)"i{Ww "Tw?;Y毿MgߦNqo?i?WWĵ;|؛n`:;MP_Ϋx>~?)?wGV`cuJ"Z^pZhshq.+s/ BϱIţWʷdTQ?>z\BAjW Rm}5C 1y2vViQZ3jߏw?7%ZSt87\.w B/j\wN/P+]L y9,VP]Go|ǎ$AL8WDM',#[.\1@-}SHE= rhy\vj4o, øܘ>o^o؟a2.EBnڎSA~%4hlldwltQrӔ*FS,7nc"oF>^p?yAsRהU6@_ vEJ$l4z3M7LLd$U|4rif5']2ltkwIevɲӚ \ yai:61M{lj-M%>wʺ]'jM=lsb3lmP)W"KICXIGD-DhB ǡ Gc9"NBhBs5&!z%Rq+?N:\9HH:UNl4 92("J/sOo&gR^oX^jSLR]:ಛpM&'R[v?iȜ~iN+9*{ToF|PD& 5T1+>$M!iI0cJ^-؆\'p'h l@, .7 N$b@Fq4RSJ1.X'F!RV20-Fm[DOϖP\wb\lӇBnZ}D1͠:\1^} *b?]/7/ԽQ%&_%INg{<@? LJkCIDTh*Op A{CA,NsIi2&88Y6דdJ<\3@[@I 1@Eq:1BIhaYӏ uvmWޛi>B^ɦ\BY!63뵱2Mj;ބ $)+g# _XceZDEB0;98<ض(x+ ]PlɱFfRA:)1gP[aLCܫ, Tr;աvJW#|@ jX5慔_?S& kPܹ BD3 +=>؁7XC/w+Vvnp rZn/d| m#ՓD.Oi IηOy "%NgDj~7j޾D,fn^q~_ݝTe-Q ;Q.ZHYKh]s}=9lKlt6;}M Nel(yR`"h [U3cx-ځc5I)7w龦k޺Y< hJR14=GM|e1ڈAk",VeKq.s hT%)Nhi%AUI *4'sN?&<.FvS-dZH'9+3!kִ\[sm"8Xjd)wMhC=?@-Bqj\+ah۴A;8\N30h)M:ʢBMZʼnp9OLpQ182P\_CдoZrHIv]+r}z F#}v9I R(shY$m274$ SVАPXu;0E=W [XخF^mW 3`֙5::U`J҄T T@KPuO)f0X+YCb hU3ƣGM@֒8KdIhOG V!FjPwY*D Q'ى($NiTa[[U{ˣKͿM(f< LR-MLRpƁbHbpaIcyJFFAEO9PKD 2'6yáh%7FPdpZ j1r j|3kwIFhMc*VʩZ.*bh*k^}}wQ'mLO_iv%VmԖj9XuE-d"pFUT =<. ,P+R (fTFsHE"Ns vc2JIA\pJ TiX[d_*ta1QrRXAE"#5nunϯh7~>nO~wB xt`Ycki Bƚr(THs[C/ek [4(9+IgМd1ceɕJ8%f6䲗JbFaU1MHRln: WLCŸ@u XF 4^TDGP1Ha[`(i1gЊU#e$4xg$h!g@+QׄHbKuD(Fua1r֨_fx(iaH8hMOWu"vzf)ghh#eh$e"is;j A]J@8JMJS`)7"I5{49_JL1rkĶ#8ԋͲ!:qɮzQ֋lЋ^1Д#:(.ӭbܱ>ԅm<¶t!kVo?"rs dF@"n~|GMr_яwL{OYa$yIqDW 2)di{<:Ƀ=woq6Mn4?pTZ-J')VVvx{(exЃ-,wh~-Frݫgjw5yt2V8kB)B$L˔ cSF,7f'KL(ǫ5#g’֖&@;0dPX1at s1NR!w%*. V[Qt:Xaٲl-u QFD9h9QSR]^_+ m0-teJСiCA'w/0NZ\SRm8XC{p#[_$>@3aOp\Vxodm3)?)5Zy`j##a6Pn_SJ?-- J,\1Wx+xl4_6dnwx|vgyOn7Y5C>԰tܪVyt{01`r@$V:k4jv&V`I #A-!45 2tf-Z$/r&ZE땺d<3)՚1ILbl8 t:OQ]=ZMm.k]1XdY"ht~U&D&JVT#^0v,ǃ[=J%GSX~[3Hr%,JC|RDgHL; SBk]TL+"A>+]Hoo4Q5e[el#BW]~DN;~Zp:F@3=J7ݢ=kf~\Iۜ{,722- ך)w0W;im@Ŷ~1&䩦A2DvD߮B_8ym3:yׇ0HE?BD e8o$c%?[.[RIA?EPٻ6n%Wr݌qeUOl*7'*)Ų.i(JI=J,hݍFE &i3E|SI x ^,@qtqC2WP #\']Znl IМFDQc oC?;1E^ԟ:55g~yksTBfQqw\:4IceI[¥_u`'ʲpDY K g,L!L!S#rˑ$.ъR-IזcPȸgg(o5 놰^9*b:4 k@*'CN`D3_K8-? ?}O4"H%|`4N{'7\{-xJBI/\hf^Q)M$SV 'mZ:K "%8 T$הbd[ z%Ҁ߃#SC 7;)lϖ8)&8IT eata\1꺩a2$OWC83q6SQlC&  љ}:OP ^Ş)$Ր7ZOHt6|ѝfrbInQM4sQO2ʓJ8-4Qs<ThPɑߩ6zp@q"fٜ)9%t*Ӽt1+Omem|upv Ollv@e:b-zj{ѝl뜨#[]tպ*e|2>|z:F|puLh1H^cJRDx;۟=.|wx7?>}_߯_ӛO_z}J>}?/O_WLDžᨖ`~{@xVM5-i iߥ]dkڽ.>FeՆkk%@'/ӏ//C|6Z5qW͠+~(+H6L;.^ a/BB4b|Fխ{M4q}{}IJlu 7?F DhJB)bD (j ayOzjB啕F=ψ".$ nc4zsyo8E:&G]vOM{ȹ8N}ɀO3z0oa{ݷJ 4|OCEj>bRBמq~b[ k$R+546clBLj$ੴ^x<HoA=!.Y :owA(,M _ygLDe0Q@:l^uXz]]7qsP?B],z<[ty! q+-߽MlnDi{Knv{z{-ےlj Fm iOGCZˠW?(vRIjm)+" 1%D.j["\m=m9WQõ<! ƫh@Irά1$3GAp`}R\9Y:XG9Lt!(obˍ'8={խ;PpyP"w.-:z&td$pzq*٘}4O?[jx.LyɳPHe't™21\3nQgeXojK`Qr%V()58Fp&pD\DO#!T 0F+HD5cqկIy3}lwx/Ư#V=B7?vdr7Mtԩ&_`p#4 pb1ern쳺B*9ic1}JS +_44Yێ;54Y!͊ل1ă)޾ym1LfE&-)-PsD@aW-v8"Jm]M߉`3黑+QIMߍJCKM;iѪN=~@*99ur(*S;LЭpy@)`U&CQWZ2"( O Mps0* 2컺Tk@U&QW\ 2zUҨV]}Jj Qϝ;C;gć_eVr Nדfx-19šzH&x"O}`0,W?|ә ݒoWBW!:;%F^|3u<{;F"xB(JIFDe u !tqìz3(5hixt\, 1)K(9thHGY@D&BKI'L2Fq(͂ҢbK(+˂ ؝$b-, KmچZߟH\P9(\2|G&c2 OD BHZ>≉FD87Z9Gp[Bh )(mMN'JQA*N`9w6/~ HԀ'AI1zs ɣ&M2FD^{K c(kAh;XhDNc0b((9"(%QQi(1|-xY+\7{ȡ1ȾELy 6L{n5;qkvw1iF0 `2ʡRV}?TT[PitAy>Ԓ<','a(y&(AsebH(Z3+a܊6J2Í@Gڨm[cܞ3+T3 _/|qݟ)Å6qa/bwW|\R༱\ɨ,0dN^#7p\T+}&A罩mof" ੂo؍ !+7;">Uٵ<)~4U&:jB|\IkC/ a,ud/`sW0#즦Uz/BLvڛ݋NWcuWU-)+{bL ls8)^~tnyac8򤔜KQGȹ^^]QdI4in}v7v\~~!ŬЛj:FFmpٹW}~9׈kN1=:>I7~,۟IюK^h8jt ḫm'śW?w{ױ@}[ϫ|{Ԟ+In wfo|~:EwRy'0 C^6;|!΋?{WFr `R]Fxtt^Wz{ޑx=OVo>OS[A;JPhrei8f g,$5m9r4_31%¾=Ш` Ejzsl\nz=iFܭ7 Lnۅۼs+&iF6tmv1o,e&Ȫ^U.;ݚ]LD%gWKrM=s~ 9)ldI9j?)툈Q揈u>MS8V"Ӊa'ߞQGh<މD(RA0ׁc%/$5  9N^@N2}Ɇo w9lLR!.콬q+\I}CQ߲'"(riBEӕG~:,gX78i#6%(/ګT1nu]4is~WÂMp|e ,' q&\PiXbxLPBQzNGwvFfG\iU@Ki:e$g ILiK4BHxKbBDx nyaO+?B*"_mepm% GImHT6y) 8/]PkɴX W3T#L4q1*Y &TYg`;v~hK4JX IF++sGPBD3O}$, u<ͤ4$s,e~|w](Tӥ\a!1Aw}Uz>6 U[.F,S](ฤ A,U`B"1hjAaS >( %:k<$? yxA(1ɒgz-ՠb1ʜh#.x뜡BDu(Oր11팜-ֵ"ݣK!̞I>Hdj= 6:dHbpa16^\Ug27=?E47y=^ygS 'ˇO O?e/4>\q~ệQ fԾ*QaȻE[Dw m^w<\ݶޘՓ?sejѕlh/}} _6w~m;@ڻZgf鸬K`:a^${l{3b[>> 7.'UI3頁zzÅ0rn6J+XVӴ/m Lj/. IXB]aBr7Mf`*_|\ f9|R܈ZOioo\ƿy}סН%_b5bk.ݫ^Ѷ) ᥻ ¿;9q!-U)HIQe6o"M.:twK{AM"ֆu`qys䗱"%$& b%/W 9, ,U=/nXUۓi>Fn!x%$Ip;uģơd t١Yu,`N O/#!&>4)pgI.J$r (# L}fH<3Dv; ~7yܞ'Ct,rz\N}Ae,L><ΛӭԆx+,uhZh3QOB5:Dϱ6fܤʹ4+DYP O%LtW\A>lgpfg7d!QU fHW$d [**"LG BSr#[Q6!Ay~0qBڰxe̯vmfjs@X=p[d&ڲ|\KZkVu ՚]^?i\n!)klc[Wϟ[ut .ڬ(nlVb[wnzo|xs=/\dM%;y*l:qcʨG䍧^SdM"< 4~t$.O`i{"K|?q}ؖe^Q5f4‘`QH؂k'4:OTY3tҝD˳ L/%x"1 yL!0w;O*g#Ij6e30}*p!xk֣sΉsQG3(d&PAz'S@\\Ǚ,@J:ZaݔpsʫmfaJ >[YRGH $I7ZzFG!%|߽ .H;qkОV|ź {UAt i!k"sV9r uN^? 308/ w@$/s"WrσqPRH-R iI 5a)D#spMZKg "2_bА0vOt?4SĠCn>8 -[Nl0Rt%8?Fy1?̶rKmSC{$\7]XqC2}!Fe erŽ.ѩSlYs7<*g(ϏͽrV9E, S:MK?lӓEq{8Xp}UzVQ!_Zf~lH}L}z^Jl >Gt0-7Xݩ"k:Sw6׷~xq7߽{~x}'7ysku~307$5CڡqsbhC|f·Wt+ƽ|q+v{k88qqnX͟PzÖ]a]Y\ Aqr.*A®ϟJhμaZ Du4q/=UO]ORReSc 05єz 1v" )土RX; qГЊZ=!RB@A5J'9g&@Sٻ8s$Wy.[LIyٙyؗ}~Pb@n;bUP`>.Ň찁P))9zu:6۲/&uYuNRi6ز@&mWw^fn߬sc juy36ξu|P~~./E?V~-Vqs(J7 .(KXH}1(ݰSއSغGx6%M/vRmӐG&1c"ErH &U_ BVA%_>?Ed&q{u>f]D}>K׶wp4r@.ь%yw% ЯÏG_'Ͱnop=΁Nf.>yA.km\ݐh$631h“1󩔷Xp >ۊQJiAdx-HSu;z OZ[Q ieɗ[6 PL QA>W4xrPu^d4͹s@zAsX\ls}cr'˧>]&-L;vL(y^-Oqܷzc/͕R{XKc+w1%Z{ztε2Eb*IJx_?3:ƭًfY(,q]NPS:֫=Ia<@%1dQۼJ%YJ[Kbl`/u7q>̙O+t=OWauلŕ_\6=2N>2J倌c_U1!B j..帨B"ѹw Ŭk^BQNg֦tJv !eYEK&B? EO͑rVjr =V}2A,Cnц /M)UZr66+|ٜ9{6'oOXm{[M=tuK5u'=u6qY|Ъ5#.f.I%ޓ4눶iƙʵ{1ƶyd]LM "0yʩJ) ԫ՞Kes\^L9g3gq<Ͻک*߬)VاuNY>LPzE`0 mu07ZՍG^4s'V' m/~c7g<ٻJ֮Xo| x^BEYՀ9w~?~;9:["؅u{־sy#X2Nn;I?F.}4V=z >F۪8*W-]mq&]DGV5g'oi$\Rk׷K;>cMV-}:_N?%QB|>8~oEw-aB"Ӣ27qQYv|ާǣlNXǸ,Iܹ.sGopeo\+ON>O7o ~zpH Ӧ5 `Y˂ ;vH$~5ps _:\ +cOW Na.W^:\J ;:$ap z ~W8ɽxvp3տ ~N|< 6tf}|뗽/@l e&Lw| 3n\WsrM|dwgG';cQh4$#C/|ٟ;)կS`탧Θ] ]`~'z.7˞^ު5{HTg 7d-6M #@QE\H.cf-e"_^!yh登̔w #]#E|GT}wz!V6Pf2=lǥTJ{6j;?x}׬]]z盓V?q4˪}=7rp=bPK]#hT2{WX5(+=YgV^nJ>?U4+ԼѢ m.ޛ\ƱU+ջqUK=r1lΣl͝kH݇R+MC;RAJB.w6ej͙!nZdK6[d&+UZ0buhFحY|ՅIkѵɋxz#O>RMR̥!v6֚Hun0ٲW0)n܌"IKwP#1ݹ`Kz FG Ӓ9k GpG[83 j3}M5¡Xj#ܣc?$g ,ΧYXX15j "r4րfYklwH!s=ֆRvm4ΌgT,M,V⧌MLAH9ŀ^X40:-18M>t L2i _堡ć% mF6b/蔱%e B1y+=`k$+SD-4o`TƆIc G$y}6 Uآ`ѮJ*c5 E9V #(J2()il;_x*7]<+7d2r&2&`Y E4vn47e|bj+!7AwV"01d a̷vNcxYH5M\Jl0PX%;cJTCP fXƯ `f"\+4ho@;D\ ̑&+DgDK @Φ]m())it*(Rq&MB{Fge] %ѷԐ9Ʈ`-5DR[[&)"q Z,]$&=a'.Cj>6b A4y@gYpr2/֭E_jd̘u$'7cDMd-0A{a|~]t&XϧguZtjA##^kyX6= P G 2Fnp4H,`td9y!t%SR2|x ɓ]@NrYVEyЪfPiLJȼ ZÌL'R͋cA $И#O]eM6{n}[@q;2Cq f,T]ɂN5~d}gyI |qǦmtY+'1 O6 ]pϟuo.!Of2ǤŦ0uSy-&9 qw`6fnbVmDeb-c(#\W߹C׀R"T2[{8gg EbPQ' @HA[^WPTĪ`S mIxV 1bڛ6v%a8$1)Y5͘cqXdי$@fj&kdP?zP*ڸ6AC(" *4\b,KҰv l3% RRN EpWtX;fHgYtg&HY@YɃxΐ6RVM@ED'oV@ ba@?&Y* 3cak 4$mF6@S)Hu57ZsahE9k6<)2zlz4XvV!=;69h7%<%2`A9<,Z͡ xyDh6?gg Jds *)aQdi9M{0P}f!yq( T ˫N{HT"[NnI!GHNXj@e+ YT1d%mQCH޸Sg9(: qjcԕJG1-y71x_jhHƚ5Hv/Ɏ[Ȥ +\-sP1*?WfUiZح DOk@(`0`| =C+Bf~nC"I ;FNì>kUi9]d/QMzQEP$ y IWKF$di`M,66T~+yG5DP )o 0e$^c#r/mȥ/HWnMIȋM&9g3"f8lQXEWKmB]Q&F(9}OGהw 5]Sո|M~aml^yϺ>.s 亞 pUPFCIN c8H)gj28FqH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8&rm^G_YR;He(P" $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@Wnʖ"qF"^@St FiqH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8&2/`9( r#"N;X%*qM #H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 4'ЇS&ZٳV<ՔK^OPTmv?~֬VO0PmUI%-X.bKv[Joĸ4+c1b1+kx2q**1t9  X XnR>:v\J }IXbrM1 unbAV"U_b ኧ.W,׸Rpj;XjrA+k f"j mrSun\|уC} P);8W8~rLWsIs]QTan#}Pnr3e^=1Ox̮ڠuTڑaL?قpEU XmTceW8 8)W,\Z;z\JW:.W+bpr)WU\MWS͗4vEJƏW䑡Cp5\Yor+yRpj Wҡjr* W, 89Xnt2:X6y\}\ `Ab'$bAVkq*]MW!DcA"Xc4-fU( b@QN `n LB-b+56rm.ּԊ:yg@<,Q:[GzvTy^E=yy6+:Kͳ~PޮMAo^ 6kڽQo?(9$Es^É2B}yn0;3g׋ zsu7w|WS"֕IU1S0?j=xҵln/.7I!~rZ-P7冊VDzzI}΁>w>VZ{RU6]eֱW4P6<Ñ}AjnxNlwT2TgG}lRNi|nq䘽M]S'm2r5U$F ڹH0`KZ`8zK23XLn ' S?v`\S0ҏl$4U\=wޗ+RX XгE5v\JW* \` )WֺUz% ^aAb.+|)2Wj*8  \\T)bzcWh=tp4AdZ[ Xm}2H*)kC(W^{cb1HB1CMGeJ)*0;('$Ad,7f I28E\Ep0j/XP;ˍ$+Ri `ܑl|vsD {g Ï>=\^BZK%7=0ӏzV`ǃ^aW X5cFp5A\F \` \ܡ֏W2Fq -<7_rT\)wz0Se4 |  z ~r*WF+ViWS C'SRpj?IOZp5A\ 4+PNtr/W6TqeC,)"C1bX Xc*p ⊺>A,+c,Ww; : n%׫bpjq*f\}\YBA"> ZڄRpEjmcG+3 K v\\Jj'^ NW *W$بr \]L2jW*y28E\K +P+V;TFWU47Г/0TbQh@'2JWSUj+>(~lү`ǓƲQJ%@頔JլU 5[F%**<-,zUg8ZP}gzE NE7^3&!*e^*߽rȻ?/Ysa 9珺=Q=7W"xINRja'=pfHL{RN94tGAj^U6 & m;{ddz˻s:JBoC0J۶usQ5gs*حՀhS1Yu־1vFt4نZBA]{?xv𾘣,e}tHW77ߨŢOQ\TmuiTB~0#7@* ҡg߭UZٺ!6g׷&-Y֯t*:Ixд"wgtطwW>!@.//( L?=9.j\n,wǡ|Uq~E0D%ߵtOJagNwXVwQ蟟=>{F 95PCgLK`Jl)0)pЇ&4FJ10PnLK 5q#pA2ЦYQ<RTǻ]z{A h)6b[tߛ9qgt3™wTOW#߹>:GmؿΟ>C}ϻklVYv]])ԍSkP<:zImVR"9u6kh?فƥj0*&w114SN䰮sZALLM-hH(}lD}l6<&}mIE[<2 j8);k=Z"k J?adA0AU 3MCT)GZVTkɶrO"ikt!wQUuP56NF}2dL{3er6l&59.{lF{q]߶m P;6F#e򩮣6Щ4vYNS5Ls"O0iscʙ }Ȏ*ך2XmhuC8v:h؀ &D ўF*BNQ,. n imF9j$wm$G_!Kp_ݞ ov cEJZ/!Eyǰe쮙ǯU\Hfc˂9/n3EC__![cuJ\*q0x|*B%uuMewa)o^MGo0Iз]2S8F9k7$.>V[;l1Tk1*-#ߞbIg J$ iIX#qjFmMN))}m-\hC~WɓgZgRRmk!5Қ.l3ԅ ]  {dܐW>Xoߨ`*?FEIeh"ɡ d4%c)$0Gme90(Eu,DFq|L (jSj Z%#\2|;R(Y(1hlu"gaZr1OEk'guZC /808#!2oI8Q#`ɋ-[`&"g`+K_%5 YɹIG> ȩF}5rک*T4bkF-kDiN#nx8Z GFn%tȅf -x>;n}bV51mJ>BM2%r3!hɍD#yB WkY#Fvxq5Su%EhY/N/vzq+BR>Of@ ZiE\Ґ `L 1!{E"`OECӲ>EV_mh?miuIp]+%k[c;T/N{ܥzx*u=wY\<}rٻ f&z?ޣq |yo9ozҕ[Q_=rdӫ|O#:08o9 mqr6&G&TrBkIP 椕>L\Fަ6x&y|td<vd6΅y3p]cvmbofno71P.ިu׈Fۻ±ݒr ɖ C{e@+f6UV'x6ziJ4B Z"Z F%{WL=Yu4[6=Z w-^}y86 ֳ !\2ܴ^x-i zjl;JHKhP *6\6ЖJeXVJ1Z_eC[/A[pV}pV}pV}Ѡg@H!2+%RcP Q.8Y M?yerv -PV mG \GuZn 3̎o_ʯԷoq+#CG~1JڥuSzvn-D9& `1J\J?=<19Yn}{er8jS>r_fD/y]}q+p;9Ĺ*78py1NO+^_А0p)*eE$d.U6CnQ>hsLcoXf՞܋_,xj<&0 !|P'hLP5H@Kt| ~2MH=H @T{HE fj֐fNFNEMG-f!Kjt7|S꼺/gRpAʲin7۔ }Y]V\}Dž@g>nFlW-T֎SW#ȍG$B]u[y2gK6ӟ|`FW42<==xˤ1~4 idE#wv}2&wyif~(W7hyߩيx&2,=pB][Ϛ۟tt~a1#qxuRͭdݷiWE9_o5MtiD ,yr6r6hi`9vȨSb't9D;TCDv@(B "#*e%Dي &7a`^h~/L1+,Ie)*xmP' 99n9;X, 2h.y+2)uMl˻KlnMEײ2%rYB(n"eA`"exd$F9'Asf(b 0X]f4tº"4^d0bR*x' `dExK~];Ǝۏev$لOTL(|)X/Lk!h#zAù]Aګ\)v Y0ނCK3930Tdb2["ZԢ)%e*vn3'6l7 HCs$(piAw;,! Iot_ƌ7Hy >~.0}xWb/_40R\2t/%:H:>Nw*zh\>r[qK@ 0L] 2xBJ ]m5#FKzaȜ@X\O䦯@ӬާE89O#0|ou3~]Z̞=?\ݻJJJ+ cȒ"}wEKzk@EHrKtjQcof{%FT^rep- ba/.Vg !Vx;K_il\I(Ӹ2| AL8:lG?ٿO64ߜ`WdӪMj,9U%s) ϟG7L};&{%|xo4?cI 0&tpzחĎ>}O?|_~~?'.Ӈ#zU# $EX,|ҦidvXڱ6zwYWy >gbڡDžmHʏon}uo(-V~]E]S/ͿBd_MKǽ8Ϗ"صdUU% h-0KĬgI~IF getLGN;䕑(#mjxC;+I8U^UKot8BJ9o{$+. BGֆCIzKA)y.#*g"d:mExgi5:u헇louX͵VPJ5o^ɿQ~߬]HJ6ɜfbF*fĮ$9Zn5Č:B 8bҞ%YH+BLꜹN$GJ`,C:XdgJCF锠*cz2y+s.9o ܩXڼzֶ/__oF>E[{ Ap3q| H0I69 /SAB:7TIO)g4,bdxL 5 &ϳ4\ n[3HBr3 d0'`}7_s ?x*e+y)%*uǐN?mܻ,MP{30]{fyz5Wi/ˊ>:-/&8eHE)zfѻw?}y,ɽ,gA0+r(4QBγeWBa0^Iv~d#{uY!bf<\ڜs+W]y3ο`Wqq>3 s({T\%zuS`wV2X/&OTaRz#ԸBn%'7)3Oj`T h}9BpFXtMy2B*k(Q$B)eX 4DP4E۬ޚ:=V)n[EĖPƽuH /h"M] \ 7.ϝ+ } ΜLN̾|!JƖm LYlU'2%J3}P:J.{䰴m9#VӤ#lcOo {:o{:7䓑ȭ"J+`%j8Sj f "GXICXyX6cO_iGQ%l0e:#Nxs"Y<f7 .HMVvCNX B8FSVEaE`/q'JԼ@jg97s01>Υd1 i!#w he`9!roAȟ|4;(DTF%Pe*m"NS5fXIKn|_Jϋ ה(&H׾PDl˔sOSIz4F470}7]L.ǹ`1X>7cgmI 9Zr;Afxwf4wa>|}jkcïC CUbFz0)l+g)Oc, TcݨD[Vv *m ݧ",sWm#WElҾ}(Yv0g}l ?G E}]64[R#R7̫T"jylݬJڥP+JY _ZFoQ/mJ;iT*%UE 0G5I\&JU %](3 %fcZWcUZ5@JuW@ɥMZ 0C5pź-pe׮@JDW P&4ID*I);c9•@`ړ%$-ovY•6iW 0د$h \5jGrpKWԈN5pJƻړ]=KRBʍ%Ŕ} 8l%PDJbVbFVZ)pjEC/{nRϥ?IH=N{`ٻ^d#00$gxIyt/!ʄX~moDQ'{ ,S-{(yڽe/iv޴'%g[{ C=V}QpcZWI\WIZx) B\=CTb"ƭ$.m$M$&\=CZ(&c֘O]|?ih:\%)G6iW ]%qik$-o1v,J"p5}Hu2 ll&4-b!I*dqq$t95DӻI>%/15]3 3&/7Co2b7:_ODz'6L׷Z͑O|gf`lRa3xpF`r#[J &YPKvQ+!gSoOYw3T"NЃOSM2x{̈$j͙Hk$93TW-˫\>-Ɨ r|yQQc`-8ǜ!/rk$>֮+^u$$*qI 5AOdI5G/82A Ϭ# k Rr/42PFފ2HUHYeZ30{-#chn"õc᭐$wkC_1q={& VD9 ǒƩ̜D ā֎pitJsrKR XKW>Uǀ$lrŰ[)r5/yxɌ'+s15'2͇MQ5pCeq;osaN >=,^ٚ·-EyRyJ9&ு9ΰDÈǚ{!"'ZQZM" @OpiQe7`Ȍ ^`AD6qFtڰq RT3 ҽͮf|^b:S_Lon6nAn?0bKp)+V!S'yǹIQ(= (s%`7FP` yIl;HLBDN6qFl;)]hjWuifvҠof!̡%LgKw#ۙ8\_X/B_^_ L \S |9M\h߾^_ P7ݺ]i"my-+O43tM^EgL6QNHL.H&p +Q"1L4(pLeT0C&1r7ʴ &)jH TxDJNA$s 0qa4"=ʓH&F Հ{Pb.0ʺNZxU3YΫpMeH^}9y^eK9JanU ?zӠCEPh$|\q((&GP>yBf+mjJgu^?0Λƫz]FPT*aN"'uR}ٷKjOi%C&p[kb|}MmՐjlfy|EU6*-7kأbx1`zSx{y#jmkT/j-S?u$Ua|?Y +f)p6#WXPnx1KAQ`|opo|~~xuۏo^}u_}x fALaU`?HE x]U ͫUkeM&{]}t}iUobn tb!ZH-&xŃ.|_AO#eGTR}E*]K3; Q!:ˆl%aVĝ$U?$)d ZΏVB cg9H8 ;"oQD` ;2IpҞ?v1 >`l7CXs+(JBئd[u3u:Y)t6Ɠ=Oл`ugRwIQVWa5"Z(ڜ-rm| D~ܚL[oB7@Z|>3K;Ю@UgT%1FiHm'g)Oc.*5XPJyYE:Ⱥr?c#4h6!PThy3˲oC'WV{uo{T΋Dd~n<9pIln _K`~o-jDBHp}!-M[CZP 9Whs~<]-ix`,R*g era)$ŤG[A[KJ*R[˺ߒ q@ ,0cHL)` A4"@L+B05.JBFt{`s{Aڡ*a],Tܰm!'nf+kνzmr~;Pb-âɱGGO$&:j6<$a7YŪQEC,ygƔfb~ ŃV\o]G0?K&aԿ~0iՖ;w,yKDdQ,NcXY5߭/T`A^|Ӏ1PmBh, cu]cekOɋnf{ *0*Y T}eg;*by5ռlRs:bmV_{!jޕD"se9ro^fW:)|^=Eo͙֖arQRÎ+nr8u<ώ9s˭#bePSUЯg=I!pn`k.PHƘ8P=g1g^j$w8>⾂y>'|g{ 3{1k3WJe3|>gJ1׸srz!)wu`!گY%F^39D-Q JzlK(u ''ڐNPFH0$R6 kIq;]c 8M(rnSTGEdQ0A[R'RHD#xݢ3q6[W,.-\<`EZa͢Steϝy0ky| &XO02C!e X bs ə\r>Ygo{ٟd#S;?Yn,' 0!0l^s:( +![;'fdpqUc7jMO2K<؏nǟӮMK5tGN)Hsf;"sFnH1V"h ^̨>b#*$;A (UttY⬯@}lۉlob^_ST~EY>g)80s3oG;O%[eTN4=9ڟ G;!vsאȭ"RNBRo084+qМCGšDC𤵀 fG0&RJ!Ni2z56{"*Ja1䄥$GG/_P!$Uch J9G>; ҳF零0, fc}+)eLFג)E 0ďck$k^{6YJχόUe*!A* ^ K_{5?v9`sYHSSbr)h{(=-"1VPK)ShJJI)Ωt)DIXH>1?3E4DqJ7DbBZaާԙ8D`'۞t_^l=\yjL-FgE22>}|?^Vuq$\lM'D;mAJ`pR씎!0Iۉ6+DlZS-E68WP슧7 ψ"fP$&qz.IZ*v"1IYS$(ӈu9_j|grEs@$q$rϑx[[.3yk?I`]{枝#q1 w^ ^G Q#lR;˱)oƖjR%^ϳp`ovKOj3 v4Wڢs=/'0EGD"#W#$"üS9nZYeobXnqoz;"%E4"B1AX!2(emPQVEk0y;]j,|e ^Y HޝlVt6<":(qcnT8+Pq!-;qVֲ\xb$jc,Pƕ;2<竃<sdԀHcgUHi֔miIـC6q qNGnr*:~qģ)%;E^CZӺdjލ&ӣ&8{?:?85O~X=9=A({9 ga Eo@^+KrD:uK0+rI`{}Tmu`8_O$<\%Fv4 |e!bf<\ڜsdIL|)E{;8ۋitDϸ)G,%DB5A,D![<.)΃AtkzUc9:KfeĢfw.=VEWDiVVj/BE4Çj<{ŗQß$${F4.2Ĉn%'$?AB2gxbOLL)!\[x `ޒU*\trji@+Jҹл Ib(]&SdP3lLB%r246qI ˏKSme<7FP~v1~3/jt{Uƚiz} >,nv/`[-f̠6 j#oFgy7r]tM@2sV'a% EI1*$QʌE*lEA(*H>oI[ @2&)Jʹȏ3Ub0'%۝y'7 󨰑v܃Wަ2X2IJ[Ԓ NxCǔK9 eRbvF:6a7=miWRe"|̼><9RR9Ixc0ZTE=& BY𳋥:GB栂S/kfgTN`иz]ikߍxpZUDӯE7:3Z a[3aT^5656^{-~amU=Π|!w1̓8[B:'-Q`!sz/|P()`Owlf}f:CJ>hd)h[bQ~Z1Ǩky̗ėi{\bfL<[)1¦Ki f: +tiLͩQvH|='r?woy!:e ZxQ4)!hX}1D }ګGҷ{sխdio#Ds0s- N%_uQlJlpThbh{ܬ9t .P -5BYt b,6_c)!PϷmV$rnu-\;v1TKU;:쁢kw%i=j[7{u/Kk{*Tu+lKO'b6퓩?M쏫)ׄN~P5vi^{ce*VK @4`LK7>bo{ahf0\^ƅP43ykN?C 0_$f}̏$RRcR ɣյ& ri/ }h~`!K(DAC@CQBlv;Ͳ과,o)W)J4[|0#,i:4lh7ث"u%3OP=NN333ΨdJe 7HY!:G{kuk _=qfpѧADied oo}0XmV,V:ٳqv+c/eˣ٩,DjLVRH3sDA9gkR z  DRdHP,p^L.-9`ryvv~4^nOs>ᘟw?K/$w'͵xQ̢aW~WR<̏_ٓ DZ2cIbCH.Y;Z1R7[R;]f "MǗG/?KypVtȧ_LZΖnAel.=7Y۫BT/ B/ r/ BHx/ B_k/?>Ȋ>p2Q]{!^{!^MgIQ$ Vqao fI&UJۓL^e$ }Czߐ7 }Czߐ7 }Czߐ7 }CFj3sE_OsO&勪ӡ>Ղz)SFR@$3oR99I֧!u 뎄 /NE/LP3!*sL6fHDŋb-@.`8ljڜ}Φ& ;rN)Hdd].I8a!1yz{*پ׻Փגt:3b^JR~_p=C ^>AG[4ڲ /oH[e-tno5owDWL5klj\dDͦ TTH/!'&Ѥ@ѻBF=Wknd@ld3h;y! C>H.G1y8aj6$ e^ӥH5<~Y痾24"i_OGy!'i!ON.&W7ϼ'<'Ln~ _-M|n~>pzt덷8M~[_`d F*S:cM ]#f>ܸ;~Ro\Uxl;|7^ԳR7lBj~Hß)|E@O0@Vv!!Zv%q'+]C36fj/Iz&F +|Qxغ*%g e!GJ)aGoSzcג>:ӡv}n14ЋE=Cm|tLLes"|<-fQBW/ɿY61?kx T&8E)UtNS/ddio~o11 M12']&>gx'"D%r>b@^x|E/_q{سnN4OGlTNu{[RMH!y^0xJʑR j, y1" %IHK@l4D,^]&yb*CqQV)F{^T%Iyxig}첬8KLIJKOh&,Rv_:_n *?G m_Y8iO٭!~1 XS؞>jJʽC_Jt veH`jp…WLJh% k"5KH2m4TDW=R#ZU2 ɣQ{j0#Xư*6öE~S6 Q[9;#,|uiST.Rduw9VD$m6֝'I^]^+`bL29Ρuu^=Lg1enͲChC&=yzأ絖C<ݽz}^ 7سkjמz<~M.soyʍs~ه=Ojo <lܕċazg^wN -rôɩ*^)+. -.ҮߚjQ'D!c=IZBCQ"C6Xv[ (WKM)0qZFe)A@άG:dgHpam)Q2p92.}1um>nq8>?ny-=(epو\tƤJF ^8˄qϨB&^x:a۪s*:\X[ eHwJ4:D2T* wZI%pD0@{H$ :ʏ傂{P6ŷ=n0>I G. YŘch5?keģRG:Ypj*մ#qu`O9sQQ/RĔPG=3P. &\0c3Rt: 5pKFqHIMTJX "=Q jA,jXKll祧!e9`Jpf\ DlQ$Zy6XFC$IX~qS qo'0~jh>WNgm#CMYR; A#L9s)(,h6!8: CJ jb /0`MH9;vr|I YUy"cqoKpML)HY}UWJhr~)TQ48r{!o>L(6 +.Re J+lY{h ̃KZI&1ׯвafhPN{ QO]O>.eܛ%^[ԭ%Qz}U\w(w=fJkU]TU4R(E`;fM4q';<$)drZƏVB cg9H8 ;"o("6` ;2IOphD^^rtx0﹯{ j0c;d:X[*Dm Q9>~_}Y/&RH5qBb{]C~!;`kv>tyBw$=Zy?0LjiUyyMV0#)6gF\s60c??Y\Ho}I;|O!|yJAap6HFZ{1[,(ly-3H?m0N. ݨsSH뙐L=Z&~!ʖr % EXJ!۠L 3Ńh!hk-^ޏ[Aɺ? q@ ,0cHL)` A4"@L+B05.JB|>JҩG`3YezJZǺبqĭ9e6'v57fj?s+2*wr3#0N),jxzUq&.cT T `5G&x=R&}Y'\D&}DyҮ[D6"˪X 4B7dɍu4D{S(y̝vX-"ro0CJf/h"vFˇ 2lՅ繶Cv{wOFy|jBa頋Lr&Yi@LW*L#웖6FGKm1>ʨJia=|"%?}PVDʉRVRK0#mT#' bizT!$UchW%QuF3r qdaT,rs)e ,p4L;4d`9!zfy f÷N]Di/9v3|%gS4a̟نk? >P\T).S{_%X k6h@|7d0)Z7u4gLF$ O}z2[{REE|\ycfb ak7{4<\o|=.aT՜a |QR&( dag gig^Tы/j{ ..WSnmZ74V.4jE㡄{+"hR64eu}W]*eU'A мn6)owi1kJ!woJ-:,UTZª.Qܐ A%^ㄿ+x K^2r3D9rX ;ۻ_鼤Kޑ"VA`X4?9Z{A^Up L 1bSqBT`r,l=ۀ-ˆ^FcLBXYL^jʈ+1h#2`O뎍3rjAvU*C,?`z̛v#('@q=Hzi2tpuasIn5bkQ`(H($sg#$"üS9nZYe/E"\/^0wE&1JhEbBeP !ڠ0)`mw/AG̓, yw_bn{oZ(y=d|:iWp0#1VcT2\ؑO=<xF#MI $yS+RfF15F[@R`6 %O''uiݟ_ %ǧKaQ2)Qݨ-OLRKHN\dhKkR\~#o#G!$g?Q$П!暐/L N4,dy<' cqc<ۼP7KсzkV?R\h?;9ҟ9Uu")d*cdLD} cUuoh\ƒJLo,*]oIWvc7+<{yٝ GXNrZ-[ղ>JvIƑjv5"Yl~Oxsb/Y AS}Eoac$_ !F 15zBzdϗk.O*wAVmK̺n?o[i_4 DOIW/)6vtĘi%7;0Mv= 3#ӁI袈ˮbsitȜwI pqy3VN&<ƟN= ԍzo~ٜ̈́MpNQgz͍Q[N݈R̶Q#H=w /lmm1 FdJ%0K&W-=Ȕ$Q'6"19QХ@1^Lt6O//RD,Wxf)h (-,L`W{M&dkK&tyҫ iW̗Vks;xOEpjȭz{R ay+KK#)[v4=DX0z 4#F+!_[% #~?wܴKU'$5;a;%ށ_&]$j~Q c'PZSVB&{ϭ6|nlYKΈzbd~PIZuUȵx,iL]]*+TW\KU!Ѩ+"sV޺"*QQ]Bu%4ZÏH] mQW$nGdCWWJzJ1Ů` *r~,PJzTWP])A#RWD0ǣ J*Z1tuETrFu  Qe>K2\YaK=mk9G\nh&՝rͷ YIA:ADwMM5)YGSjSj)Sa\]nWzOiH4c 9^Wt baqf4UHRɥ6 :EQ&N,OCj)w=!v01W>W=Ldlm"欍*t29m E^]4F8F(ܱF !494:; &r^JnJ'dRz1Ncl U M0kVYcƓK,8>#"bрSJ{/S I;}ʙ;o Ra#է8^3jg0ʒc/QdƂT t(%B8E1 5OSu 8?3k㏀x.tȓ&&ɘ($^ YYuH\H \q7*J +#>r'lCQT۳;HoAd%{/ z8]hfszj|_Qowl `y`+ODRbAs$m[HOi^.pn>۷ŅO]^V/ϣK_ΧΊmz[e!f1q:;iUJ09#%~_<*jUY-_2Ex:P_m_*M 9HY_&ZG@72WPlc- 9-nzF*1v&|ޫOSy5 3#sӁI\T5u.ga#ss']0N&)LO:],P7qAyd27nƲlvB57Z )杺%Q#1Fhv, “mxLփFdJ%0'W-=Ȕ$Q'6"19QХ@1R%`>VfTx+P<Æ ^iIRxٔ n@&;mB'0**Jp`60j#rQS nJ+څUFy=A2l`s>J'UP1␢[M[2ﯾ\,LXާ%;e] OڞSk5|p>O*ƞ=^)2iDÔ 9pM@&!,XRq..0Fl aX/- A)!bJx$76E*yMg_ge7gaI+!%%AG56$'>gtx4*:]3 ZVM!IxF]H$¢#44#EZs`Cy%UI7_PAP}ꢰb2C1]SuU3OUF1hҜ, ? )QMt9<%f'Ϛ<-JJytɒ,,>3!s ύ6 rjxWO2 +GOׄiMp4}zK\ijhlwU,4 OlD@Ș\MY0a4nlt^ >@it-mD֔c\f)/ubR ѐaĽ Ñ;- wf$]3!j Yu%>e^iW SRTR !zmPRR@ϒCd1B;U_GU}A{av.]﷌3*VH3|2qq0gjFu;E1FjD^Y#N#vqmђUg L<2rK g-C.4chA >1CJ3"AI6\&D.z&B1hc$OZ(#*k]F i!:qɶz*EN/xŽYPLR2GQeiH0& tŘ"oӋЋqǶTև[؃ {sbk#mC. !imIp] e?rfk3?0}rB#ѧ !\14>$dHzD^[{ǻMxbܻOzWâԻsj? 9lzg Aֻkߟʑ\oέNxaԷ|oB| A* Z4A@9i+A$9Qࢷ?I^9;8?wOF=usa^ȅh1\^KטB>1J1_aҔy롪QR m7ʁku]e_Ḱb rtKZ,(=(-{A :@dr>Yb.pHPZ7ҧ,f02_NC 0X9d82a@-&13${|=CbW:?1vX]fx{{:EN^/Ǘqn<Á+8.<8یА0p)JWАLHkpBGoѹcmzFc|כY& LU#|bVCOhk ޑ_ ؙ_p™LO쉬 "k!F0ZHr9'?%f fhqvZQjGlI>?/g t$jrk vߗ<ϭ]Uğsx|!d_ɒ+" <qiiUh.|¬tyK6ӟKiuqbOs8~䡋{m;sc׽{OBޚl"O:b,qMFd^ykI_u`3٦.E,uH0d!Q\yE$)rH"% 0J?\ {< E{CޟwtzKX7ah-d0bR*U:G/xteA$tiL81dZ_ǟ2MlBL' *j&0KɔzeR^ FGJ\-t;w *t;d9A`NJ>[g%gUb2[ii]R󊘟ؼ?6"@ߓGNLi+ MC ˆwa9U"8鍮;I3b?" '%066IK("yԞ8ϸQGܒ3P2Lp%Gh)G~<0|4aܻ"%z=D''Ǔ)49#'Q.rSJ !bjيҞ j9Gw[sݛ^kko&g,hɜ\s?y?=[nRXmդ/woc+F"qH |0b0ʬfYޒ &ZU3 &=ޟ`GOrըj,9Q(e >]1+c~{q;zlP4 vg/|qU)8m?.lk+@Rta?_Q6[n&y^&1?{qޡ`גU!0v@Zn֗^Y"6x$'O2ڔPm8 0-0vfɗTLZxC;+Ip\Vto Zku[07z!e,75 $@[* cY(Š h}C 4m킶Jk#ҪAπBd5WJxåL #K"eHHCjDjo0ټ\!PlUk)ϫ?sĉ<>wMt}GтaJءBG tt5iSmCS}Z,taN_zӗJ,g4]RLJlf~ZSifܮmF9M.Lv5XUћ ?a^ߛl1/"}nXKim2BUE7пfNR_h]_~b;,ۭlB,V2kyS6*2DJvU6Lv_]v|*FIg@5tByiyE,HdِLf41@&s) K,尞wk(| GWި^VܯRh\[&%EUY'Ovz㋿FӦڦ39Kֿ_-n*(l(u/95SJzxk]QygUrW]|6)dMڏ!uH u9vޕ?4`gMdf,Q҇4-|㋳1kVzz)6=uT_MWo{r[&uꞄ6muOCi~Յ'i{匎++;tp] ]1ZoeBWHWVIiEt +u=]1J'tut^I+=d th\rbI ʙVEWFЬכВ{ʝ=n~tϦ>|v壿ُaQʟߎONDiZj{ eitf[|ޥO66/~s۷Y:*Ԇ󷸹|2]^]쾞`SBq1jr>9@<3W7r&}s`,WuIی>xQyuyCGۻ9?"?]k¸#>Y9{4Fu"Ey|1,3p?mSwW_g|`2_mpuU*Zr=BH5"L Qpir)fϼ~l)8U-B*7e2xʥ4S5ٛvT >[{cq^y jkP $;lPESVqa@,ZV&$Z=5Qualis]FK4MQr90CmJ=5ٻwO!bi k+^i53|dm\]ܕ)aM",=XzB 8  9| ccK5cE紫Ө%gQ௽.9Q6$6s˺ڦbҥi½ІdSh2 ѡL6((XjDcUyFM{.F> ҉R@uI%L~/үOr޲sզzK|zNQ^db).-)A)TBzudV@N`Q|C). UHdFBr`%:*- Oy@EE;((h-a]}^qDo\R %+y<Ī%4J\,dp/C^Ev! )2 q#YeV\Zн:nhJ^=VFU@*z6 VyBRZ6faka 5hS5 d{rz] .lj RL;0N-`߬jځr)W>E!CEȏY6b%- vImvYEo{jEy=nR uW4@e 6o}0K]bC;b͂G¢ Äb *u;5qC%:.(J@T`=~U\6 d&/5pAGZ(S1+E2GRF@nёu@ Q 2 }APSWP~S]TL,uykոEUDI)bu-d޻{d0Wz,IUt u@V#5:+=%5K͐Ȳք(qN i$;r|ZQ8bC(=_n]łTM%~b6\Br|\1fPTu,F!NHXg} L|a'/gCԂ7`*V=V>y;5͈ ǀ 1y_-p<(r>A/0Jh+IWsh#f BX9Y4<1  ŗ[,J肸;72ɞj  C< f=+[{"o\> ~wVxY:w kߵ昴UV)kdFh bL> id % xKd€䰾T9 ]Qn$}qQD]tQ"=TP=`P낂a K{hU߲Yal+4OkDvRڡNBa}GCHQ0 bpt c,1Jwu#A g=ggP` B)1Q0Ih j hNKhcKڊ[WjңZ4AUJF/Igg &S@Z tmK&=?jeAц#Q_=µG],FC=*mP FxkMQm(-|h䋑Q+Bj@S A8I/Z{.9`â1[.Ղpǥ11D,0r";EY 5'T|Irs>k1RcՋ?gjg'B4{5eBJ~ۨp7ov~,Ɖk Atjșѐ> p6ټV}6&6;8~ع*'9iWZ038 ?1H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 tpN~#AupZf~52Zk ͗8 I'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q`@(^ p_FN .\wY'9tFH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 tN cAkr^p) K 8 '!:'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qp@?ow)0Gޜr)6_7{ճW/0HiM%&ZK6E{o\JŸtƥϦJȬ8gpI,2 ]$]q}++vʮ6k+FkܩDWL~=tpImb(&aEtŀ_ ]s=+Fվy<,tu@tSzuƟ{9 ?_ס}:Zӥi;yh1+~׷׵in&gUy7M=Q!5>.^2m'ϻ~n8?;#knFvZ?^?"X巏zsǛQj"rs XmL|QhsWO-3a3'l/eg0r8^i &n@~h.{E֏xg6B l~(v{.P`6h{iCΰV#K$;"C=LےMJʼn{&"9|1gf@E4~t0S5  QBOVY:eUgko0?<Pivb w<|{1p?{J]J'eJĺJS{?.ΞsMju8yB7R0Ok|Xٵ:Ϯ-\E*׎xIDŽ/]N&4wq6RCopиJh VYH])nWW1fxy{[e#f!`y5E"FM!R)66qFOrWN}RD rH)rH vMT줉/W,чT?ڬO"ZR?F+Y%$NM~vβJԩ 9JJ#ʁJs:µ<(@9zρ:(kdTkq&'37 ʡ,,@O"W#q}2.$XLGoW;yؘ.69x`e \Om[y_˻kVg)_'\Xlԋ3Ȭ]A> 5W#[߃ty=aKtYؽϷه榨UssXZK\"(5up=,.` ]Ry*PDɌA;Y;ߖ7 *S fb^h 9'D/&`jjXhFp9r$Z{sb]B4+ծCEDWXh  ]ZAi Qr '\ˈ [ ]\%<]JMtOW'HWLɺh 2 ]!c 魫S+I1"g ]Z}gQIҕ 6lH4tp}d#+Ch J ]!Z+NW=,uute؝ULDCWWX '2 J+z:ALSϰ]!\uhHЎT3xteX6?|#J%oL0Kfk6n; NӍSy{ɛ5G9ZƎtS3c4mдizס\|ɾUUEWWX ukf=]= ]1e>Fpt(eOWHW\P]!`K+Cq<]!BHOWCW JS }Q#X *uBtut%\FCWWX ·=] ])8CNWEtu:tJDDWxAkl,th9c]+D)DOW'HWx4xlvZLCZBWzgmG Q2 7I5㱮Uu%=] ] cɺh >h%:]!J[WHWL{lX) ]!\3h5| Q>vutǿO`F+Km,thEcWR+HWX%b]`-qfeygQr ҕ]`KI4tp93(;vzOWDWVىK7<r0N`nK5Mс%F<1L ;f:M7CٱC.9m@Ӵ]2:BGOZT Qv-=g+5*"Lh j ]ZN:o]!Jj{:A⠨dLt c6Bte $] Υ!>9-5XWRu\B{:A\DU"Bw+%@6#Fh j ]!Z+NWR ҕ]`aId4Bi+Di)ҕ]!`upm4ev%NdFDW9ۢ++YDv}DSa- e3 GtAu,<ְ7LJkѣ Ws=A-<͆#;luWV?\Ę ZIUDdJXfͫ]UFK`bǖ-^\WNg zg XpŪ/~5x[s:yЗ?FjЃLs>͌3)99%(G_M'Y@}|=\=@s#aoz_a W_ _P[qlцV8h)],u T6~YJb޼\_ʂBU~~]L>3 L\2icxƄdɴՆ,(G/@$ISrhN!oA/hſvTob f!l_7b@R gI\<(cs&3cs9`s cg*@ċ`*=28.{^cWwtt50!?\fFjn,?Ayf2 ĺxrd\l;=z "r@x|.ͲZI`"眱\ys"Ϯ"ˍQR:=<`Ҵ* 23V;gx)x^,7(Rhg7ޘ\gQ- Hkp H9)Z齰tIX#s rj (^ ׅry+ !eM~Dm>z B0.&PT J,ު{x7˲`ZgYa @Ps.dR j/#p"hJq2C Jlћp 6)epir8vo8K}.`.`߽U˫QXe]KY L[O'i-_j*%Zϯ_׾^o ]m&q ?vE0ty n4¸]L&oqUӡpq $ ˾`)"ΓW0j2= ?zngPatr=4ͪ$SøGвepc'7n4-ϓ260K߸g#&eW ۋshz`nutuԶm?%OSgvi5"[j~jc6"F*ev7.ny~R~8HǥYN\ c5Mf; Z-_ԏ-hH4ԵaD9ՠ97@" Z0[Y1\Yx+3qɶQ̙mVOޤaE,/ = ha=ԋ}(A:9~6x~ż@7rC8{`ڌw|mleݸxugO|vvwͬ\+8lqF0`Gwz聅 z6 ?Wg.[ʈ[i U1ňڠFKK*M#TR2ޤG ZMv}4BU΃c ҨYF+„9xGm5~zQHL Q&4̥oJ{~x~LrIvh[XxQqx/-֚s&܀T2tn$K*xjTjQp{/t(aZBrc eT+ͅvkpj'NqCML48=r&7'ZB]m.˗jia{IH06cBڂ\ͼp҂Yy[' q"a t/d̾eku۳Ӿ=y2,sNJ`#cVlO+jct]Hɉ,S!JM|Jr `gRtSwwДzXZma4UHk}#ଆW˄#*uKm>Te{I [L{7``{"Wp-L+sgx@-[ǭa9\cd*L| |Wog`v 썥/W26D~7z7e-3 8bMLQ(=HDpJKsM /Z9{F8#\^Nzb819UxF*SG0c7" slfQ iHk%6|%0bfK4873Eu4\Z,q[tY%Kh8>#.b&N#UY( 2\Ƶa\Hg2_zRO<_CI f.2<ԀiyfRrlH " D ?[8;2sW |NmFZPʙ a*k@ \P7ZN0Зz״"u6 4(j۳fOl^jQ/=,I6̿pȀ>`d!:JfTRCu*Ki9r$@ktw`#*l :-A'JW-)X2cϟ܏:`}_dƤ(Afɲ&UJ~t0mُ_ITzk..w2 (:~assH7$9N6%efDfD >`|i692H ɦI$%ͪꨪx}x[VW+Qyv6 '_]=7f%܇'&a1Cr^;z尓՞sFx$0Be{5ٔơaتлI;ڦS iՇۚkM>Y ك)>btrT* dX-ٌ)y2 `T~s780#ب 7?[tB&O6%v.Zj[ 'CE6`n0D 1D3*?Ј~N~lXzٰܳafz)ĜCt Q ?ITRְ"3QEɲ\YVؐU6R%+jW:D:gTM$߆)=R }"D{/|S;6 nk|Q7ۍ㔶s}6N/f?c,|i4>7ެ$[VyYV˦(OϯE1t*\_E.ȵ_Qi{L 2%lk7?yitɆqlh/ELW,aĄ6 z5ɻm3ٷGݰv?q7{<:>~Am.ZXkz)[hQE_.{n\\^HmE//6#=8t5v4:- yBɉ#d{J À!! i 'ZD5U@\Aw?쩥ª4WD(9ZNW2lq58Ap XrRg'j iii'gV-:]깡R VޤTuk&Ql݆MWNNݏ[P ڈ4Knky rlX9Ӄ\T 1Y4'S3`#kPA Ż1%eJ5JdUfHIN*%l58(%Gs 1zXS@vP5(F*;p{{>]EAd,^18TFUO'cCM򎾹yA~.j' DA^Ms &iQ X`F͑Hq,/|\0T i<> cTKɡssS@ħ<:;=Klu\|'}O^ [rLcoՖj4'P1ѮAVFQRv,UmEftL5b,mDhrdT+*Z[ UTo؍tӅ8@]..|_L3#29fVbryiym7Yc&]2ZU'Ğ!l!QmF|6H8:䠳 L_8Z fh4 0Q(ՠ/]MD;95v<= Tv7>tϬ_k YkNQ dJ0lIgGӄK)J0ojjB((:A!5Lpq%BՊ+%Pa7roWJbFjDY#Y#* UWPdR0rETaklS9d:Z)G(ظX# ԦgbԭMJ ҘLL[sv#gFȣWɶSuvCͬgxǣFWcbuQQv`ӽM lj2m_(րmSũnq>ىOk_mh7 >6vO1bL|@ܜBُ^kO}6۲ӓvH{T5\įLN v=vN{|:D{Mg+O']_X|ߞ[p|{o:®U~=Ǹែɻeȃ*6two5|=8{M foq M46i<0DRƒ\&Os>]|f (r-"Ţ5IP6j0Ђ2hAg9 mXǸz%mYao[z> g-M8X:vvGnr~ N~i\TN.ZwwOC.y'zkZQlm7j]|<{w}@Jy6BmzQʃ]yRlŇօٻ6$ ~0v% 0{$%R!)"U1zP␔8mSgU5UKQ9BniV:y T?WUξxQXs [n}0T~999uEkAȊ@i\YtWVӜ,a!8U=;hxv=ɲ`P1pFuU[|QOq%L>rUfh ~Eh6yw?)3チ]/XpO[0| w7߷Zjo^5lNkTm.[=ulz.[gA[/ ÷IQnekV<@ z0`2?e|QQ-T v5v[l3 @>::"AR(H x1]Qj0=_gMdbΞBxNH;Ӯ){hoґU#dx (G3?H ~hpb,^T_'İ?7e ]16%!0 GE<^\J2hƒPD.*dp@_VnYȉ] C/k˙0_TF7w40Xp:+zS[]ֺN^t˹-mT0ceꯪ_WjV?xv*ռ@ͯfu+gz9Nj= 7?|=UN.Y Aۯ^e0xb<?L0ۋ?7zCROAk'8媭j ,7҃UiI@e 6ۼdS2镺ʄ~wb%o=MR0Ă"8 duMgiʬ0eI(AP%F2θfZs\'ݸi]Pחł0z~zWlnk*kf'rfE0}W ?_@CHA[tyZ)5c帧 Tm٨Ǟ(zs?"&'\S|8-&mK[udRUX.wD+l?ǞJԝ?Ӳy)q* `AT.FQaґ6`%5Eܩd g&|nevW fW}-eR]NO2=﹥ۋߋ?n?~쨳\uIfх\xb0U%&[*ML՚1Y(`Quy|+g}x<MU'ג]_B4^ܓfɘyCgpU#?y(v9&J].SRZ cI9c4G}LeW4 ^s; nԔPW$QcB!g,|ɄeNTԠƨd 1Pcr 罥BDJ%e;==S;;wg!@ H+ )"zC"=1%Gy\9P$A wW+!DŽdÁ 1҇e%Z`{] `-3L|Bi< nNKi2^^]5S2r |mAscuv&Dk磫8Bq;V` ŒMl--ՋGoD950PЊ[3 `gV"":QN8uK7`R P]#+9 g݁LBY:SyC@1$:c3s:F9 DjY6MU쮮S7HPw\,6t|][jm8Un|HZF-Aɜk6Dm9(CX:K?x䑇.<ެ%s(f}{))t ]!ZMNWW+%$!R_/inӶkٯj0xI_eEƼ{uȇrBUUlr=,lipqͲ44,7WwI|6hI E NBnya s2Z&fihIƂ\,r$9Y[ʜk] &ކki> -V=1~xeIO@;삋y*(t4T(ͽI2_u4ۍhQ^g. ډ>w>(؂JE-d ڱ6tHUT.!"V*HX(:pF`̭}hɋJ1ӂhaZiaӡd:%-+k+@K9:]!Jiz:@b[a+lm2tŴ -]+D)XOWHW\kہm"x2tpJ-erOWCWPRRb4B> Q^uV"z,5K͐ք(ѱo'6l=>TVcGbE |f ʠ^5Q,KTcǘAQեNrF`&v` S=pokN)?rx[U0lh+u10!{Ay@ *}vl+M$]͡-6v 2r i<1 - .Bp ) >@(&=9t^S`ȺXÝ.̓u9Zxü &( WLd:[nmG㭐!@@8RǗ>US]ߨ^ uUw(7jue:B4TuV -flo'jjUFe#MπռCb8;kYA pf5ڀJ]knhϷޚདྷ"ȎYɡ-jw;$aS6i` nᾠG_jC湵ϡ̃%̃ woq[S>g6rn`g=?iY0' tYhlP6s>@(xjhu\\ k1iO[i  2Ę @99l֋ʌ4IMGÁwâD) H[NU B{ sMvE.WsXSAaK{h3`=\߲]Ͱg6Ճ'h"AuN a9TQ Ӫ ᅣ˅Pöc$t،, ?( U *cKULmTc@sZ@[ VܢܼR;kѮUVm(|Ϥ@L@Z %`5{Pqab2|ƛoރ \K}4 bTs(b* J.T0vly/FWfiq0p(f(8qK>RZ7̤3[15 i4|>,ZR- .!bc-ݱ(ZI~}pen V3V Kk.?t+{Z F()ĠT1t}ԋBt>wgqv: ?]1XlMC1T*\s{዇qb;ݿf}7qOwߞ&_7kl{{+_x =WOD ~cWڝ]_O6J'?3~ vnK>;5|ٟaGG w]=Yɸ?[F8G*") z@3Lm۽Jx@$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@G$'I1ܘ֒*~||QZyOL4+I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$&0Oɯ) d&' & |($2 t$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I Iq]C5%xG' & > 1J@AG% $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@$$$ $I IIH@:$'Gᬿ|_7^QS~pO뮽˫sp{D$šKMzK\:{%y .nӪq הdy5F^t(%gyt=M}ך%nKf|O33-L`=KkM[ >R}|MWp2/=]&8!sfm?\JB/;3\=}}}x,oڮzw_Owo6?^7ݾ\'Xy>}Yx彳y]Z]V% M)&UuPkWϞ_˼O%OO"}eu)>t;wX]1\BW֦C+FUTkjgi=wWb^:]1J3xt m ďuk+F XNX؝êo.y+ۏ|9 ~8o1PW86l'o4>^N勛7nؒu1B&b}8C=we\|rޚG9:n VJcOzI:HCoՍJY#+!"j p),?Օդ:BrFx`'up}Z ]1xQ&c+o!<AS+W-C+FI VR\]p bDk+F ( BWGHWdURk2dZM+޵q$BR~|Nr1 0iH?CCI8H1GAhiftUݼ uuteмM~+kl ]!ZNWtryr+D{ * c T ]o1_fRt2e=B΅=l 05piW6v nk\%\sd:MWCQ@ӶC #9m]tB5ZaNWRNdDֶ=Nਂc->R3 I"IU+Z "ZNWꎮND= m]!Z-NWi]= ]I%)m3oV m+@ˈn:]!JŮN=tp9k ]!ZA@)Jkn]iCk rBWv@ihdbWl ]!ZP;utute +|FǺB5+@+gOWwiҕ,M?RÄ0>(Rc$k͙=vσ>vfu}̞Jҫn1o䴼h'9uJ'+ZuS*u'Iam9QA0VʶjyR##$+f5VVi7H=z$`IqC kR:Β(mb?| -Xci }|EF%O~ QVMwtu:t,3,ꨋ6m+@ˈj:]!ʆeutP.~tEtjup9m+D+M Q.+g!|Yc1v)bНoh%B#t5 Jv|j(EbvMӎzʶN|l>]Q jZCW۪VT+,Et+kh ]1N(o<]JfdGW'HWs}=y>6LXP6Ύז"ZCW=J(M:I)J[DWXhuhtTE ;+%ku9iO "ZśNWt)ҕyz,]`EB53VԄwtutm"•-thM+@H "]Y `lk Bt(mg]$]PUڇGTkWkʡ-PZu̷PJbLiAKY[{:Ulպ,;rtG5;~ 8HSZ"ݤcBIsIωp0*r fmUFKF1."lzܽ >e(%j@ޔqb]o[Jvyqe1᭣Λ Y᤯l3a(1ܟ6ڿ2.`q}]v7@n8꜈Ǜ"m$fUf6ԊOJv)֏!XPvLQ2.FN{J`_ l~SL3ee=,(%2D1a5IgxCu$'ـ^l} m^SR?gp(JgvD!:(0)Dе^y>o \%a)^ /Ҝ ?m0 {^cW ӵA VfKCR#뚊.c/b~dB2\"8-.XwY./n~)90٦i[FЇ1liI.Ux5#TfŽEtӳCᠸ)ݪ4H $Y{fa82`( MʨUi%4ӳ3 #Ug t~P^ec0P*%'.Ib## ͙ 'jLi`x im W!, &Q# kֳg^$5Y! *~]G2 , 0X#C#FI&%c@($":S>~-"=xV^@7~WEL0\̬NCe#@EdLL05Iu'~[F?.&_6PTej@馜S^U:rW5qfEw..0*"ʛqB4_\=6 xcñ[?|2>`TAXgpV)_*ER=#(e2\ \N@"SJD9F^k5$hg6[zl~\}iv&!jY;)A3X~HƒUw'a3x]^ӥM"izƠc7Z&!,{4x#d[?X'`zx>l~3u_π.hez@?zngPc d14ʥ}LaRNN9}Ğ/'%h*K>ZXt<F3?;.YnNL7p"/dHiYH]&Xu] 1'M϶)E,uIt}a;a]k%pقa  ̔b9О_ Gqb`-C IC&X)i&g^~ C +ca{va}P xn“by2Mxzni&C paPerWdA+)7K?2́/id{ ӄK*/& Wn/dޯ5SSqb~l8YQt<~X[nm)@6yD>7/w BvDkIȻkU ];) kPĘƓ [đ|2}YuݍcZ׍U뮶jY iZV,A|~LWa$h[yǖjrO d†^^]8__?_ǫ7?/?Q/)0 A?¯`#ܪ^\j]WPߺjڒ:[z+lzotSofl-w_Y/b'[GMht!]c/A/7rêՏ7PUe,(Ā q!kX@^e ne#m[$~8u?I+Zp9`ab( ^JET`cg8J`jK4YOzÚdט(1|nqg[&R5ɚ=9r/!KAcݼN-:J}M&CfwZsјsYե'uGq9Jp݂ڧ>iPچ*lA˺-hz%KyՕiXuGa.A67{vgb=2M3m䲻+tc|g[؇Oޜi8-ƅ^;iw uk?ԀvQwIiuxzipdO]|҂o[1 B6*ZywzUx\"]N&If羹H=*d1ŐS @EOiܛshR%,߽e}xiCwEA;e>~R{DnDc b{*S7NRFʊ3XDy,KIV:BDk\ԄT<*% i!HScrGm`**0Y7\G?su%r^}[`ط -v܎g3KUw/eNfU?hƬ|R\uv?s`y4/۔',r-ov9H't8偿^|ZYy/ݞ}I^nf>YRDl0+X *rA'PE!WKJ@&P#֞H+1L5[ :c yˢT5ٯY?/u[%յ7_3z܁`Q3|%d)o oSV;MIii.|Z3a_Oշ7ǩ !95 ?iJ둲tbr%zU$f$(Ɏ|Ƒ&RՠˆJisI;W*g Uۚ5W5#wXE!!/P JDUcvTn J,qYsQ,B@!Y;j>R?O{@X˦vݶ^٠v{PmN㙭|V^zm C.)A[384q-3iFyqyK'Iq}lg^q, O]GCb3KA|IEpeij|>ɥu/Wڿ| 25nG17RBqPCvÍHs.ϟK>n~ڙk ϯbڃTގ ͬnA.+~xȯuoW}ǽO֋?Lxn(]OuIn _[GBm Ն3nQSYGQcj 0m/уᅪVwO/tgҫ1{|2rz>{g'_EϷDYb#6_3:pz}T 7z fdO)99EVa<'^`L0yZ..tjZ`ѱ !"`i=e;7o}#[aB[an޺T>jX LU1P@cK+IVNr;憽Wt7D^6Tg\5&ܸ"Z6Wr@a\UB͹f\r@Wf,ΩM2rɢRGZF31A 9VS&Cdn'9Me񬴵^,vʹ1&՗*$L^P,ϡ,w~JsWPix)^8(r& ʶXwJк-ZjEuvg>1wZ× 5fh]90:9#a+t8Xrv;.NzRSC5 Ϊdf@kqS*o8@.6v$~<.nzmovq{w|"*Q)]⠰VlVȮUr¢&9Y,B[e<0aȉRu2cmTj[>0j)pXP;kX#k#czu dpB@~^.œO_|lmilύ]Vz~ YcK;rBB1RR9Z Gb60Ngq~)Ϧg-h qsBJSTUsCd-SI*.t<]tgsp|xe# X_HiAIS jpZ[t`,ySLE <~%QQYo[tPМ}B=)~7tm _aěݝB.9p35JsPft16_1g뀽|C1f-]$OJٌ?܌?ƶH:dyb!X --ţ7GۯZFѿ屿i],⧣]"=u{3Gf-OfKo򴽕8]~=ootUt2&<Isem Ӗ?}5W[wY +I⋋/˿n=]m8u%̚quܯ׮ش%mz%\nY ~ϡTՖzJZ#giգk>5,OdKR<'|m;{M? 4b&,EGN5qґ|jCc݌_e?Dſne:\,]$Z'9GEb*z2`}zff*ۣ;?nMgi͘!hU?j2Smo[ɚAv wf #1GhYr?x<jbjm +i}Q2 SD+rRHKrZAVxA:3m].;xL$"Dc „TP|t d1u1zmqcS6Iymh+NVMMP4ēWZ!"ʹo}+m!L}7 vun`,qo/BL˸0s7<w /ĞC:{\u9֮ TtT+ڌmoQ&|QeCY#&ƨYC: )frL `?O`TΈ8 v(|A7v!ܒ.W-iw8즌n}ہYяJkVZYne^58׵,ƘJJ*r9rd|Y X'K\1@TU/6!t:jBTMAwj7qG[v-uYݞ?k|η7y%[~^a8;aq4\~zKHV&  qHU ^[{>SXݩiĞ)(Y)2A.sD*9h2$dkhq4ms9bSbvLJ֤f58]ץG YE #^`<4ΕfNnMLV^:jzUwDUuu f!ڍ[fJ58 15ɿf"4f&-35)L,bjamZ|2rrV9Z\"3$\Uɨ)g)96מ_c:2G6W#?B>%吊DU T$Vc߮¡.C50(%Gm̉c$\F@rPYsL м8vqN(űFd:9bdo*ըjQ2dkBF\T GEė(P$06I刢$X_i!Gqy0uVnW|<0][cξM%:y,7v~d Lɺ͏ow҇ɋ;NEL#rD*^+ ] ek/>%m/wJW dM*Cζ#_0 6T,pl^,.f,F$-_E-56É["wWAJF]+<pX=5KCqW92jł$C1&hrvHy32 G=Y, 3 ?ݾx˃7Wϑ~&ߞl ٗO(C-dTgr?$[ήVdcJwGE=>RՐЬ]aΤ1&H9Fr::)s49M7qOis4+ٗfd.řx엵IbaK&{ev48Z$M98fЍ=z {X77b5F>:c)lcɴZFZ"yak"Cs~o]Z%֋o~a٣j9f’ʼX2b4ܝ=/{ۊW˜q}_lzQ&n_,^]uw8]]iގh{k0փNz+zOq_G5ۚ[~ʽ6$m^[ǰ|Jlj Vk MTA{dI-e%}Î?Nyɐ9C;0 aNwߔ\PuT~SQJPvozMGT=mw++QK0u\Ӱg\ 7#\`aSǕ\Zs<WRϸ:F\q,HAu+9B7+VkqɇD%WG+}bR7+V+Q#ULO FgJb7DMOVO7gnpxڔOn< ]=+I'.ʜ}[|?~pms ,A/N6>C@<| dD|$&겜pE%ZXDzquY?HzڨW7gmV柖7\\pdJD/۳n \ܦ7WLu~ۥ7e2ad(w̙cE{+h\Vn\f _槳/u{[ZpZ[o{gMOx3SZ6$28C=FSR4O^>w%=Ӹ zOsAs{әVIN]V294)̞[)S%s6ڹ9?@`ZV'K.4Z'n`z*JF"~Ҏc Ɏa;BSS 'V۶t (7_tg9PSd7X}Y =ӊ3ieEygG+kTAW߷GW|[]g3N~}}Vl[1zAFU S B?= x ߕ5X,n:Y|wvlw{mζk>=qvj%-YOؑ&ɨt㭉\mzD{k1zkӮ#\` D&f8xpE7YLW"3u/rC7""ff\! پp%}WR D-JTquF d$"npr +V #1,`PWM~s0xk:•7A|0ͲSQKjnWrzRikgJn20yUn3zǕٱ͉RlU+VK*LWŒ ܎Mdu즳2ձ?7'hwvʸ l klp屃Tu0fLE=[XnpRf r.Ìcw6=ǓozYX;[\s)1nkp\UF6X_?ᇵQOmzFk.7uwVWe~wy__x&t]=KV zǯx~~sLm!WJ]O\xfcyY%XXϳ;y{ŗ1ݾrw^s{7r7X|Хcvv>pv{wY_[j50>2>|b^,'TGAV+{ji8 ?G|?HHc ݣ!.diFfg擸f.K囫r %˖EDcOS7h|`.@6Z I[4# Q当;97kIf0k{Ou}~vÃu-/iϒ uu<,{-Y͖@շX79B4<$ De4 ]\{ߗ|ժ䀫bZŢUhиR\46T1{Sn;*].ͅ4>^8jRզ   'O:q}[16%0TK2G j-j]VP'­S* @t &YnUUurxu.6[j͘\jQQf*K{R: ŪT $fs`KI(pgflؚl&uSJb2eU}h9Ũ+kOE3~7]˖"CqENNn9$ݚ҄ [ˆRb( zr^et,yrh.yd͏&ὂe+d'HxchOh_EV\Җ-8uHl)R4֧D!۪_y\&awnnҫ28US#%X3sP#nmtYːkRD; պ9i\1v"Ī)L>ra~A?r]#rבe Zt)VٔdP*2%֦RhSq m ,QŬs֞TqC:$TM٪\ RkڕAqύQ31Kβ)V]Qk4!ytTl͕ʣKIxLJ*:/ZM7d1)K.آS[ABe*b_gC/5=onT3RTa/mdT%3d, l&  "ŪZu;^ȭ[Ruyy,r@fiBx.VGXP9[R )ta,Bp{mY;!JPAH_(4APSע Ҧ-Rq*.Qޫ() EK3oS=`#c`z u>XԠNHPl,@'@ A! dbr/ZQ8ľAw ԙJ/7޶bTJeӚM8ZU]dCf Gb:pnPQJo:@Cv<+M t%nقJUe ,ìc:<#y䄰,_n`QB ʃk34I-(2:k Fʇ`]l{ g (^b!4_k2$kuk 7<o< c {US=T?Q=MwzgxaۣJq$ !w5 d7 __/7"dUۢ ִ}< ]{ #RS!*F;P`:(J d*q(uԡ)E&h!=` U~l :OXW ׮uNCH,w$NPUA .. jֵ#Q\10hw \O` 4B)1ciZS:~LEiommۭ $kփ*HVBɗML3@Zڶ9-FTdx57@Kin-Z!@քJ VAPuʛrl҂F3 _ 2~/6O[hJ7a F;k|T{= Zv(=mGe(mף;3[1Pi7@NbU6ӥ11D 0r,mAvlUZI>æ"T$-8ԅ;)Ẓ1Z Y\ y]Es]&RcR/^[.뫥aPm24AYNԧ&[6}[/[Û7[רUb5s Mt:7ѐ>W+nym>nJR\-[,Kx r^]ԷG:9hN|}||Bo=N[ָӻbu6X>Iͯ ׯZg_'_HomnnL$zs>;3n|جaHLBWVF}V7}5ˋy=>Fjy<ͷDjԼ"OB&"nj[Qldq:ܷMn;X⍲U ݹax4wjjz δe^ `WҥmѫiU]h2m'GxRct&Us]~5 :d͖ccjXtUaNȞ(~Z7^\.Wv3gѺj9ֱ>x?wT~&tτ{Ë́BivK1ej]AWDW쌮={Q(ie|6&T]!+Gåjц8ub[ЕIEU]&W ]1\j+F銷QAjt*VDW ףnF]mPzt(u!]Q"zѩ{[bdj+F (k1ЕObM =3p}5omRS+ J**]M k5tpxFjthةzծbv jt@/ˋ~YMH~XshGwy֙ґB6;oӖ?2*p6h‘(ʒ WDWM XM1d zrMLɶ+uJBW֘BWs+ڄ%+{6ơu&NeHBW3++&jRWT=S טZ::]1J/oHWGjRW 3pC5 hRS+FzHj>t]1\f!j7b(1BW3+^LEtY&p֫/d`:]aJ?V8ZBkN\8VFo޽sQ>t<9쉓#jnJM4: l!>Sbbvt( ]͐L]m\++M:NBW3+م"bQUCW69uК0ub;TYeʥ&buAUΓ*ҕW*s+n=sWvsW %] ]1گ=|bQ9UTјP]1PÍ]14+Z9UFiU]1o3 b~QF#]BIc2a"rɸJ[3R4ZzowPͩzg9zg\/[}vǨ^“uy{^oq#N/L?QC$2󡨹5S پi!tOL#|͠vL]U_ 78eK82t Ivv=Ah3E`'.j8Rh_ZWf.ey=GyKU5h3~at%#ѴVA97MN>Z^&vuLĻد@SMܱ]1JٻG"2hS*d X phm$}+k)2x+B*thAʣ"ҕ=պttRMY%DޕGP`j~z֖VbTM>657ɫѐ' %p0hVGNփ4ޮ0+\`hv./כro&2XJХ3+GB ZZk:۱]7Ҟ7)Ks?k&ʶ]|Rnԍb,Yh%F#Em,qRYɁJ3 o)5 +#NSe52lc# K::71gWonF[,h@ ] WTX9(Zˊ{_˺ڢ Tw; nhJhO@ iv_X]7 ; C(OPjYO^Z9:]1`.pɦBW)]ydCJ:HslE¤BWӕGil=+u:!b!p BWʣt2 a#hG L<\H&ha(md& `rjaU ]1`%(pK Е(>1=]yut,t%o`&pBW'OWюRA!<\%S+1v()> ѕU 7ܕGK;]1J[qg+C=kCutRI^RKׯnlǃ6;W[}v5c)zX{/,dZ` Vո6_'!v \F;+ZUY43FzyuV"5 65x:9?T󧊧EF͸:IG3^VW//]7_7E]|¸gdl2/# _W ${]ؔŋyrO.<_|l+/?a>GϽԣNή엄J~xSoQ_OelRfϫy;}^r2͋r~rLi 4У(J-9HS_mJz3JP/oJmREY׼<MzoF{F?ܜZFƔ<{Tx>vip֮DsI{:U_"9//Է|(JV?<*̳{ R~j0,[ә-κlZ_ʪȦUMUecswҷxX|YWĺhfry05ec7sRoAsyڰjzI iD陔-@g|);R[Bbv^njov  ARQ & @*R{[^o 2g-xem:r.3򻥃4k:gӳfӹ%Pu/M[ xH#ACVK=&Dn+h#gp>?rL-DmVc[O?۷GǺXnY_zɃcQꆢ.f7u9/76ZS.XAf[Ď̈F$ 6hQK[iGvlyC HxrUiAUȡX֤vVEmj=iJq3P-h,u=z$& 0M$wӯZ#!U)a&*$M:Idarb&x/HЩCJ.f8 3s6y߆N/Ng'\ʳ0r~r<8.?yx_w͇ nk!c c`|5u!?y-\(ĺr $hO1P(/ow3j,oX}?*qT+lFvؐS#WvcT@cSI9T:YOW65.UشH=M_vӵNȚo=>nnQSy-ԸchgٮGh#U4H+5Uc$f]x5۵LZ ڃ'%R:hs:h-yYZ~մxvTc1@xk%o)ݟ.:7nֺ^muۋܫw>zQz7(z*ib1d?~y3 8y77 \u3k󊽡Ӹ ] t:/3i?]y!oqd9?_][`ݧΏg!{-l>cgj1hdd=m x["= U_z4kzԓs/i{+\}}6r}76f :OKRkI$WrN8raSڒw| Pbxl*Z6:YK*t;5u*rP6Z ԨJ-ZTM5@aiFE%FT8ZF#1hEHJJc[yG17ŝ ~3BB l)Ȋ9<9|WWD9PRl0yi3W%E䒉s(UɫV݀}5]|) 3ޓ43D$:GAL&\PQ]0a(9=N uRBQ sV9e$5)J=ICm^^ij4L`2i Q2my]Jq] R2ȼ.%օp[oϷB&R&ʇ_3aR.qK2Lw=٨06 L;%a{vHdɅ\.{4|ecs<kN+V{2af}kɬ;гۯ[˵ {g܎Q\2:zh%0*vɬAVNa&M-lr$C0-Sr!'< ˥}χϯؠ8 =ZrDi}1>GY=Tt) \Kw?kokS:3M2aKAYZ)jF(KmJhDVJ{ gCsjվMwڷ-UZ)94ԾmH>|V1`z @ (T0F%$ M8[\w2 J̚; R~ -crXӗCOv#_~z?px>-Y귕Zc,WF% ܗP0Ab6В͘q.xu+vRTҰpd*)5ۋn$kj/z/b0aƷ@ID2Y Td-z!_es'dA} u:ՙd"DF\Ab֢d!3څB$uՓy/u4羽UY|"XׯUWO1Q_2.IzV j1zWQ<Ç/mmϵ;iaCϝ;׌ӵOC볿fٻ֍,W ?4P׵/Cp&A$h,Җ͢,_Eɶ(:V~u: OFDNi_F w-W0*)haLDl2ϛغ3S) 9W7J .BxLvா3{9 #%&`kƤ6-ON$LҵJJs/Po#"OG<ᡓ`:0X4ˋa\R_7_y5xRV%[/qzndt3I4m>fߦK^9DlZ?0gڠ10'iDտ,|SP_x f `uyPMm0sZl&0 bx|y2c8%_2c$h`lx I@?> LujZd,¹itK#CI5Z*{,a]&ɋݬQw}32nZ/4}>͊]N86҈d*%!"r ݬ?`3]P?<M7ŌRx# }f\Q* PBcP U@k,҈\fvW3e&+ɸ %BP< /Y1;τTϪ@^x"*iF.2 .iAr 0WLs^lPw-lsr-to_̍y`s{ ɖ5FNZYu߹79bŧmʝ"8O8 -w.-k\"a{J*{/1H" Njq'x1Bg,\,JMyv i) R"EsDf` *aNJK/*lA9}(!|\+2";5>B !;t!F2B ]T?.܅*9 UYos֡~R}%HǙ샼Cʻ *y6a[b/XL3݉%i bIWpefw@ğn#+mÐkyTP[ܝsJWрi `fU}W X2A_YEb숚=] eIx]@p H@.MF;HG%FKsad4epR24ҽR7=ir3~~Ny e##Y9ޅ@p\m566nDf'R& 'V>w_y)]Hq ~0v$MnN+ާmpxeeBdp:!_{oa3J2b]q0_,i І-.T㉡X779s70sre/XC2wPƤ/EkF[ #0)@aL{s|TJ\aEnI36 쭯+sBOBnl|p6J*H=xGECoN ̒HldcBFZͯP<tLSK g$tfm%?:'R0IيZj͍Ί$%%q"paXXdH4 c2g Sr.;N0x&u,VDeܠU(]ƅӮ"k#`(Єam8pYC:m:km=.Hs=K& :j~2_Yh 7{W `g\M/0^)fb#>aYS,2S̝Ԧ519H":D@2Lyw5 0QeMkGMMՉ`v|*2!d{=DRt V/NZ+ /k_z`,J+es쓊q+W]f)9@TQ*ҷH) ux rDzbm{ A:c?'Ћ fE[8? nr^ &81)(R+^M /9ԎC?G{;4R!yO%\ J@eA45S)L5`2xu!hF{bVHXׁ:vBQbk84N)&&I]=vh:'P"JPhc]@E!C4񙞱t֟1S!R qEsx5 ;zƣ9x&S5}}N(2GhtFKӐyTΠ;ԙJ?"':|| NAŗӛ|\_}-kx%7N&ERvES0;l2ci/u?`Qf !_CvxeD"sHs@2!$)(QX<-ko1d?VAIe0ꔇr[`{K9 OxqIE5r |޲yZ4XEW 0SSգ#8|qܥ"e<[?pR2g3qHYO]f AײƹK;ch슸jY?^{.##:}'O؞t_~È vCk+_swUMYL#^<_pzzxZ؋؂΀& :Fc 20u<-k|'$%т 0.j,עo_ ,1똢5 };Ʋc;?J֖:O*`Bh;|V7m-k\fWh>,h}_|VXմ<,0I+db^2?|CoԄƼ0Fa@ʘ)4'HI i4|*6+?E*9/@6U%6+aW_~u7X%<MJMJcYi֬uƦDⴝC̠C%ɘE ;Hwi`GwZ{W}.I#ͮ*ho? %ݼa:A 뗟b <)̒@'$hZg (uNS"RՆ-vm$pH4:TOYZָhJ[A&K 6+kpNn "VswLµtfÞk?:&whD鱆U}q!o:r35lj@ķ3|q3HT_:i>r\˙-ൈo-< )jzRצpp(|+cpr-gI-#Ih 1-l_ujNgv\g? PR&> %qz" !׮sRB 9D dCͯ=|b=-j\뼘 PHk@˿nVʚ[}-Зk2)f$2.nQ~n;mW012o&O?|{SN^fYZX~ W[kpH9B 蔍Pj/l<2[\ڎB ׹Q4x e>.-4$6)N'oReh,/7t3pnqz3jAOQ،*}`:L̓ߔҔƄ N2OsB%:2w3b4X8_L Ps 1ѣ9|\H#8@G3Fq'rX$d}f.XDCߖ@p4R?)UeHJÃrÁ< BO30q5?fLx2'.Zr KBKU]lWbPE*P/×<= Zڹ̎mDaQ7@@)DHy<¸|q[4gB:&AQkxM;vw!D@hmeSkN `tجA]N/R 5{~u=9Ϫ~X*(K dYƜv+^Y*"Y2ߝĒH)dRj]`ɏ%' Z  1JA,5$u{(f#zSs CoX4Ԛ920PpwCJ"֍QSe,$9hhܺԐbFӥhpl0q`R}[2Υg볒P) vTvwk*/Ыa1`*@cDBHb$)fXjY_f ^6_C-R[ :A!#NX J|NhF%`*~iD*hw\hN~Ɉ>IV56}ۓ7eT^7}Pvk(԰i`)dr=zPl3)ZZ$P2<I*8I|?V K<_Uʨm' ]CPQD"3|F0  0!2U?f3i0($ȿVeh#f=ae|j@q ;?\/눂I0v3*Jn?Y&YȺr+Iؕ*]*WEL H@b@H`1[S߬Z,’pι eʡx Fm#P Pb Y)% 8@c->ϳOe\Zye3 IFEvLP;Z D*LD';D?A-g`rFJ%<bKF(Z] kd~(V&x_Qqu#`Y]ڮ.g ̂yl;_s)B^̧>eAUFƏf[$ҬJ)M T| D$vڬ@F:6bsnJ.!8RCX1ew&-O|ػ~-ĮW) Hh[ 8B!1qdê<~[v2+A W B%j4Yc71'8@ ADFq$8 A%mtMN4 qe QY0|xlf;MB.2o%Q4pXLTzɛ-h*ϝՅaYkci]ևЩ$."e" (#2 HAEybC漀Sebnb6T `< B$(^&Uٲn`LXYM S54!HI T''ٽJ@HRn1 b[N^q.R2}@+R̥d_ne**j#zn#aȅ+9K)@=)A?>L=uNr_{6'$8d7t8w gx4-Bkd-ֲ{ F2f慪gIV{]hC 9{x,~X亸ZENJzc>hSuisҍ? #o҂b:cW i]3 Axm<Hpo3QdKŬAUBF=8sqVT?;ސ$(f&j)+ Q02%)+=9ܠr BI_a#NgY%=}wr:\DhOa?{^&H2!ksNkLΑ֩aS%|`Z{ S(-XfxZl?o-S=A@u$XGTZC%`dMAOo6 ^X>ytvh!GOwab8\|[aލ 0|ǟFH&Z4 Fu'儧̌kFݐn"zv:R{;ө\_Bj~N{ìMPN>oJv%j9zT s'Nԯsk\m7e'"4:^Z/3 0]] ͖Ń)v- .UB.쯁)?2e&.oF6_j$I-$6,QPe=TUU ߢ];^@b4,r2򘖝-N=m'*~518 C'9wrbfX6ӗ|4}sZ N*z K*IKa4Kv{_Qd=.;G /1ϡ5X/IE#TBq__:]Rş-t dHt$4Z $69}t'ngݓi@/溁9ۥuMCCpWhNkfyX Rgۈ:yJʗ/1Pz^ E&ˠoR֑(ŁÙb7"dDg18P@c`,Ajߎ'Px l~ё+0T/SU]w@t 7sxWaB ѭYuLᮘ~g4]6"YV#>Ǐ#4f9~|_:۔asG뾂= ,UBJݥ'8u$nc#ݸ~lO"ުCSI2oY>NJ#+!gg:͑ér}>p>DIHaœtYe%Cvw~cB4*&[rq3:|4fI7}v%!7 :^CQTo;`'krx}:/ތwң)V1bx{cr)3TR:V1"oD+suϽOFq,!wQ[4ʙ7Si:Mpc鯄&|6-Q[jd|j# W7&4qP5i]?_H(%^}Ltb%ۘcJ(ĕ0a-=nС`В놈퟉o³o9H.Ҷ 텗!0bjX Hp&fH49!Zchl%Q|Cn ‚\u"A f0E@iou;SADwCCL.KBr/_͐C2LQwM=㡒b-144GS*hm.݆k/f"~V[<!Uj p+^e9S͋Rs]!c%L9{>-qš7A(S0|ѻ{1y{s!<і&afʹEϵN.Ϛ͝eJ#tHE͞_%y_|r3wS$SH6q[\2(=h-5L__ii!Wv.]\O|6IQcm(cPu<a+>.$2[tٰyJX H)D5Uy 0Q%v `ϘLg7RHuZT CbDH&$ cP#c:ZRq} HU NWeSj#:a֫pr.A ݚEjph?Ћqمd$m !@~@SMќ+QU; R8lsޫ,Ψ[#,4-fTFa:׬v_s= ǷQ<}1ꩴYMvafiRIm(66}GX,G_?K%_Le}/P!ǣl^r_x>"cEQѦmX)[t<h(Fl[`ui'SQ,&2^' ϜRb~BseVZL4-|h_fI m|2ň?LdzqA,٠Iff@w΄'b'`ڎ < \j+eGUؑmހx$o 5 ɦ|>x̧!)[HA?'qpYI:{/QPgFfT)yLT;1x@ 5= x}1v\0lNJD޺V9 CȳTjTÝXN}4k^*ҹdh.KahrJ-N‹CYeCl9d3O%crӆ{y!zdlzyv׶|m`| Qdai6G|iy<|+lYwaKéflaKʨr~uRk|LPÒzf$Xet rPwnDSn/oalc<J*6[ <*"K]xPu8O/}N=V$vnZTݞ/c?g~LyWE+_7} b NndR%F ɯ5F<˿l w"o\l`D xz)Nzקx{SgyoCPO{' o t{}s/rEν(sHdd$ 2Z^q,(6r ?$Pca֋ޯy5L8DX/ 3n#|!/;y8Y*?T 8mVB*̉~LzИY59b1][(Q!-fl'xʬľf R$]$YJe\^5f"dWa2*lQ(" ȅ.rP@cb) ; CV} ~Q Lt1Q <šjš?tsn4;|}ۀE%B={kN8rʋa*c[|1Ho$usWOΜ\wq7yϟ%'Zuݜi[^mq#-ѳ *LЎP&F(n$!V7 FV13¦,_ mFtO#l8X,|(zԎ\1Z~{o`zd&BRyCLͥ0mW&~b%DdG֫|sDC`RDҴV\ RT˷v_ QEϑyn'1ZKBj{).? uӠv6l&X;o͋9Ti6y/gqc-b~yw<3$挄`)sD](A[#I8ǣ;&3yї4E %§M6̝xbp.q7yؼ=YR`02V&HfkoǏC@cٱi!F'ZwdX%:J" HKt`@*W^E_nc@BMkY53Ԋ6}WiI[k vL׏h۷V;)so*GXLH:3aUd})f+kO8fēO`A0<QGahF"[sN&DH f"&q}aj$xHe %zVʨۄv(i@&|EpH<%|/A(:©+>K8"L@Ԁ=AcӶhges'zJ*ފ&~9!+b$CS稷pУ&(t&KެnbB&@Ύ?<]U9q։_0Jn;bf|]HyTL#F\[lY 8obmě&x2T *dJ_s @Xa1_]˖A㫄ju7*|nOa;0|??ewFR">$& V#( N4:p;ƠnsP_s\^l7gU+axLj0aDGIdVЈ|8xdhv9ֿ/s#ޅPI_Q0d9z_ˢfZ BdL,Jl8|ӻX"# SqpI "6J,[M ⺋ Gӝy6ec0}l0sEQOc@@;* bvy TlRtQ4h ܎(h0KJg_aJ9=xᙠެ)͵Ռ}1\y ?çʰ ژ}/6ά ]ސnh27.D.%;ȁ(Of3]5X`vm۝I%8&>kߙ߿7;d D2t Ls#1*?Q"`ѱ+~ӸΝ- ąH{*sT@X|Y4Fs FVng_ [tKyqLy@@>,! N`^T!|HE Ү~-#J@V w Td?[P 4H_աRdA `dS!MRkAfR8F6 SeW:iRA*"7ׯChkDw"6!&B\)89Z8I)&NoG38,maB0t)%*cmןMs ORm?iZ vn JiͪNfpK'kTҵƗ[(ZS~2{>,}`AU3$uBAc=qA >Iz/_vSc;~9̩]PI\. AA< 4j،_X7ϑY\U-^%}R7xUֵ+΢|Dr[zej][M4YNZ;G0sIdAeMF0J0Fgt?qXsIϨ2$)0uiFh"G]heWu&b/a( iC7EF^j)ùh׾Qx5|@"Va',TtHab\tр# .Knr\(XZʜ* ^r "f]mOƘꋵz-I@LuAJHq }zoI :K-br߸j?P8)81ydwHxbLK e4-"3Z ι1M+׌{F@F;hlosq B%ɀH ybQ$9{n B-E>Uͮ^'֯7fDYC*ٝYJ+f숮b.b>zmޭ/V@G}nc|[Y1^sP\|igv}j+3sB?.Œ95n|$@)=D74 "E_z7oJ8~1LBz u=B0fݛYly|~}y v`%I8Õ yۘyvmrվQ_ލߏDM i[KKV:!wz{?惎H4uLjBE,AM1*{!#,eyIS@+.ݎ76΅{"2~^$KB,燷A MݳuW uR*,=pKs+#aW4^*ǃmV]Uz+ _cl^ Az',ʷƫ%!ޕ6r$BewVGzLclCȳAH`FRT!UJTٙUeƕ_z&g+1lN5L@HK&ЋيnrHYkDY7?/Q6}V/uDzV0lu=3tĐTNFr(A%|>\{Z;|aT{W|R跓"سT$yہe7cJjim0M^8Ґca. #Y5KAyJ"P'Tߤ0\ u 9_R\疢aP$Xb"jn0(fת~vho/YpU ËՊW+Yv[kz[.Jb1`<^}O RDqIЦIܳ+%@_V[ԙsc8a9(BI M3 Y>i}b̽C)[ȍ3mvRg]ApNFa9*"QZ^s7IJy{]DmWi[!GY_q¹ ЀirV-ݵq3VZ XJs!lIV Q6 %{<zr<8MN*qdjNDD¡sk]);noYlVUkz}vcBI ; A3M0jȅWmbXԹ4ÇTF7 ^@RRc|X'z{Lkbn1wq_W|6\gu4üORj0jzuӹY40D4Ljkn4Rz 1}k@\oY_o.ѝY7PJ޲HKsZ FK'-?G2>gNFk_1v1"n+}&,3i*gM25nbwnٿgnif:V&f7{\[j;^mze7M'kuUș15t N!e i'$t 6§ޅ/mhW(KԐ @km cwI]0)E_Fi& eY 6HC)]Jv+mBPud\A twaН9k'!XH`*w+qvC\Ǔѯ?}/_FG`ӣUr:>a|{y>a V}_ϣx/g-Ox0a1l_ _&Fӻ}_>pj ֠埐KV #pR~c9W?߈'_'2$fJa8L) 41lUqB`ͼ15I8*PD3ASZgs{uJة|ku_ΰt.:F-mmr8(V !MCuvIiJM"Q%%#8\YhdN.(6_"Uۿ4ϧE%\N|5pq 3VrZS%u|"fp!jq. wvv:N/`}C%T7%as»&*-˖PΡ("8h O}XяG\QE#T$ Qoen0"0Ga 4hݳu1*Ao@t%8hḷ n|g60xazxmp;W%Gx0Uw+kRж$*z`>|LJ&R:!y2ѤUp/ #VL(IAĔo[F00@YFHC9e+G) ѻı5YID!UO`c0^` ΗR]3ua&IMBW]o9@KG6hǽr*)t8!7YhY,7~⽋~pi6GP+4QPΈ́=Aib$`҃'$t::X`SЄG tg :` ڋF!X &*YT7FN1e(iLuNG_n^lk]cSrm_5GpdCS[-7@<U7IhQ^nHZ}_ܷ W~ouEp_-5;vdl:oae#o tǸ֗O Yf2A DB|덭Ẕlg̓R)@LŤS؀/suVvG,;'Wlc΀=K˹^[ΑyLs X2HLT_LT%_Gwt.&g[:uw!T]Z6Y{Mx+ԏQb }a{J/h:(czzg~|~Q9y &[Ʀǎd&^Fs nۀywo)Hh=iRbҷ*)dBп+WA[ l1e*̥)EȻ3N,T#dw]O 6"='(wU1}_J*E);S pWN`^\Pu`iURwґX!g0fA *?*9 %'a|St[y^j#{1xʬy1JE,m(\1QAG$XtftfnϿ?2Cg<<5ڟ2?"w}ѝǍ $™G&}=QaV:_I '6palϴ4W>ŸڤNVYl ֎~o& $R %4"GG0*fEix] kJH1K7 B'P0LLf լ->#0.'8Ҁy礓&oބ V o% 7VDAaU=~VL1-rc % Fo|bECCCi&VZߝڹ>]O@6\NU29R΃-^g^"< ٻ8$W -0iG=^clxyJ))bF6bTuW2Nq ߛ ڐ@Ha?'͑{Tk=o[_ggu10)tES;mmoEv\ۦ:t.tv6<@ߝt]ڋ^ڋa0剚,n:[2hRЌsԪ rd sX'LС::i&riOo7+~?xȼa?tD(k1ًA-ޚ11"nWM}r"lH5j;/ $_抬?=מКeK\ `& GZ׼!#"cxBCZ3{_G/H<%{N4".љ٣=p1h/$(q.m]["dfLw'' U7|+EטB\>0 cck/ zio?wOҐ*B^P ,0H ]p `Fb&E\fXLQ+d(k=So4;&|;Hd"z^4;Ls(.D>jI̼9?ҞP(mvNB";ByL0u^` 4|?ApmIgEwݣ MU/JW(]t5,JCөeܓh*9M.Kt.6DH-pX-lA5!꿩"*f,m,(.x 2Fg<\!q&t)e K dI*m=zh!Y>qg9.,pDC_I Ζj1FEZ \'hyě/y[N6KŁR*ASd+g! +t{X8Q;1OhB+ʥ/BM'q"EB^8WCsjb:|mM$)lpK1 u!jΑ/$re48]HWyqNܻ 9r5G67kD>Y6qƜ}S*O+VBT@rR$Rjq-c%*V-9jH:j!IҀȋXc;СLD,ջ/Qj/2vyh'!36JmȒemIR|`ק=lMF 5Ћ=uewKJ_3g,`=^NgE4ɛ(6lN}+E@~!UEgkU`K1#p9I'M9@ >#:7ł`h[b0o#2ڇ?9Zb?@] [RI|; A8Du]hG d})ĆSI;Jn!)!7xRQʤ>w|^VAJf;HtF_ƅcr!)A1#V喍sMZH#\4 1].2w0(v{mdf/YoS&ϿZ/Z;|~G? ѫϞ=g>0zOg߭~?i7?t]me#.mzTzSz1n %죰˗WƋnFBDPqvhezD(~G\q9#Cj(̠ _Mj 0صC k5zOA9wR] v6 kl ⲈkE7(M&g-^g XIB檳\10칇 H"X;'qCk2y z@`ppa9Y͜Y1kxCG<nS[JAlu03_Kc՟aAzeƠ]#v}~'r{ƓWP/=N Os|:=JƳW>>@۲>)z_Hj+ޛ]Xݯ )zs&& =ػⶆ{ߌ4hؤ|ExߧymաA,-?]"{x.^{*,|RP4-RRM5$MuLP4']"WH,%ή0]s*jd'C)W7!=08DE"nDp:UTuAE6:=BA랗ߠ:Kf]@SOSSsZ~BTW8`]z}nd O໴_ULEF][T"S1J"k'HbZpBC|nkɄiƂ5Csћ/S8(7uyۧoPXtl@^] X\iu ~r. lAFMZ-6mJaS)%r=V7-mW%{(E ^a[1R. metsνb@Mۖ(b)cZrKM"VO9_cFbgrS Y] :Q^ !l BMv͐f=ϺZEDvM.n؅n:9#X ]?<U ȋ,ɾsq=no;"~VWWrhI"<ȪےX]yNd( d\&4P[^Al:xt{ oJt~slOqij/2bog_fE l٠F˟̾&F0|m+U]q'NY;3<Njxf & Bp>䔱AH+Gⲍ)!q+oz_m1 g ~ϞMJmɅ:{y~&if"lH7bBbgRR?WJj4|9=c."^H\auOIƖ=c^_DMuh@Ҭf˲M=hR7ߟC@GYu|.1@ަOo˶2-|vӧn-١L 2-@$wSʩq2[J_48޼zHUίnj&~y})1"(hNd)#p\k3Lj67"=zTmVݓ~Ǩ_/fl`2i|jol`҃G9X/Ok88{o|3> rR:LuC dpXU}g'=fpbNU3~?{)Sk Grl=qQZp2r4⚑\xp k?,mdvzŞ%e5|X Hh؆G묾_O}r}O}\ `ŽQH7k .\s\Az;l09cu: g'_|ui ޻V?-q;l Gy0X7z-|BQ_,'w95q:m'`E+xm)?)n)5HXUGc8lzD5[Jnf8cizC5[.h篙-%7e{`)E H~3[91 ˬi8_(y1"Om푧5LxAn60jPr5uZC}ME .ZeU3p mX"U80G{LN5S#z=N}AF+b1ftMzuMqK]qɟֶ[Q,T1\!hth somo&WZ`I9zycMU޲9oaܣGC2Uw;l r8Qi'_U ck?j.ZXK@SwE`Mr- C{ktvZ[:lB]8s>o #ÔzliE7m\7s[֢Д8YU)64L!b:q(-j垮unWZ7&LJP9g\@QGUkzfSj.3kCSQFm$,XjNڠ/ ]Lٻn.o0!Aj~Vl3q";x{(vRZ_lu_u{$`6M$[b+ZM,gcMa5v=^yi+>B-Y56}%[-aF]sv`aϾv \1칺\u,j8Ç;?,g.P149\` zUplD=[J3*3Exɛ [np_gprվ0߆Y:arp'wOԃE=T`URV 0( D 4ǧ2EʞW[͜&aOk~9iM G԰5oDՀ jP3b Ő5 ɕķ*\` ,0p^WSLͬ?%uivs32GWǗ} l iFF$Uӫ{0.ԪRM̀{#@|pdK ߆cAjU{Эڃ9oַ ~3x'KK>1=)֧rjLNnݴTlu_9$3[BDG@ v:kcdB|+0VL(XZJ3l”;,=ң@wX;,=j,r ƟFmnJM?,Q@wm0)E5YǦ2,BMP 9; [<{D$N>yWd56N1SY04@r,0AhMRt Q`V#˴0w'NuR$$ϸm+I8ZG|ȵz4[AߗƄʑ]a{մhI+3Aju kˣ\D'x[,IÏ}&0+\uQ= k]f8-ІQvMAme /-ƫ,m. ucb EWN;Tb7%BW1kEyk8G #D0>HP?gQGd3mZlTI3O|?ΊNJu階蘦׍jCG9k:dxmZmI㊩WFDed oj 6JDVn!{?s&]I~xT:;9^w2wn]a!QxJEϓ?2}OΘ~2Fgp0rlrytg+IؕA!sXU>I=\/Mݾ򤧺]v {C虼Sr {}q$B8f_hy'ذ<}4WRV0׾?NT[}p q/[(倯s9 I8Zp$}9~ꃒHwivbK4GjYUNe-佭.N]ev_D[v'w+/¾cvPr ,ѥ޳5XF9@Oee(xU[3F"w:y12#O>^J"OG;yRboo1| 1m^ f~D[[o隿[!5=$vBG[ u.4s9tfۖ{r7~{GX-8}Oouk7GY`~QjzB=hO1O{KMiȒ+&ȯh`wn'߾P<{Z_{be 0PO Prԕ^{~~'*b8"*ަ9QUwiN =TC&;cv_~*9U?Sܥ.>7}1Ҏ7(޴+SIH}ROq^&;ݓv{RHݗ"0.n*:s4+paK]ڝ4+vFQ]瀧=y6nvQ% 8/^](Xe޽ʽ*ƧwގY[&{D\΃w0P,.e9T׎_ͯtJ[}9sEH<\V[TՙjB/pχ(yPSI[w¡X9.8H(s0t>p\I3f2e Z7m\7!I9QjwkN c6V襫WL?w(@Q@ l8e, j'+Q'*- 8>N7 #*fTcje}MEԋvnl?NLIN|sL a5611azZH~D\:ƺD.T:AzZf_+PH ,0Qn3Yb(@2B')/ŬVc.8+0UD#[W_b#a7/C؄Ƅtqulœ蕆=:j˨u9IW 7n12٦& x?IU/I~>aѻ vOêOQଖqe \y6^Z} `=U d&jwU̪i>{~GeJ fzZd@.,v1e>K^h*Ml ͬB]GȍFjmܶ` eMq2P0ߪ$0.YJ␃yD$ԃE=Y%w pFkN!;,Q,Mz"=oO  4:l:Xg՘K /Eq%`I6 Q:+3\g2=N3RtۂYB1*h݂i"zջ~Wo5졡BVs'zu*C!+ETP\͵kYbaAD-8U$4Ȳ܂TXH͇hy'w;qj|NRYy(c(/mi3#wےcgKWw'@"Dg_fR,%6Pxiq"H"#+;ʜpnfYE썱v>zӎ;(,-. Ԯ{՞̗2Q>Ĭ{nUk1s`tThW2w.iR:$?D@U%W;N"t1@"ռ 5P|}V5J96*CVӬ@ V S]ko[G+ Wz|d ~,LȲlɏbVӢDM4%1Yl]ݮ{S1Yl֖au(b݇JChnэu#dQ_yNyI6~9rpK`k!^F&[D~V;Aw wphb]A~w|_?}^<=oͰQ<4 7АZ88=(^.>,od_xy@w g Y uiAے=B`ߒ{FLISÍz儀¸6oY@/Ro)I e|v_2B^LKǼdZ~rSvBɪ̡[ @zhR2zS;wC1_VjWZK1+;jR0۽ *́QL\[Vj9Avϓڥx\z83AVw*fǒJ펏'!hݐ}\j[BXvCK0Ǒ(*Ed᪇sLn{|9AOrn{\t7Dg4*桙>IUjR sw<`OE[%wO_Ex2 H*{htDr|"HN4IPĕUfYɘ݅9@v4$0gIcUjW7Gz@S;wI<&j>c-Z2~lvT4<|=o~ͯ{^1ƾo:BC4d16Өj`Ѹw6)N2F6`sAYﺎ{ڰڤg#&6}]Kk:юShD|n:f/cnJ, M`lF-t3` !r)zbk $N6 Aw&L68v24(-<WgYuKI9q3}ϗ8 Ԥ~'((nتhJԎa e sn&>@i~=&IcWלƟ+2>nr)N7)o>\^]C?c+B8]ߜuywm|qd>1d{1Z@.*4+?ZyRJ}݋)ᛡ'Vb^HcڋAN˘''Vb^zK![O3;X/׹>ܮ\/̾֐Mvbrsr@,Vbzɭ¨WAڇPHPi%e|Xs'㘈c^b 0r\S$j*ۓɖ6sXZ]9vH 0(FdN*;Dj .a:&jhBT-b`Z]vC%0ïX]J\ ZlĮn"TV;Rl_(Z 9hUnE iɈ$Tbwԭ*qVluԮRU|Ct_IvO܍Gk>c3)rH'&kvm>T5ڹ;V9%KnUD$ofA*[7fj܍0;= >cZB̮2'( swB%܀lo>dR=[|s0S;@4A ?R*}O JԵ}&ś*͇7_5un}gKtck&ɭ8'fnd]F.)18h"N#cuҔU&1z:gn+e0n@aRrܷF}nvz [b)Ӟ&O@GVr91?aNm{~\<$lioozҝ.'}/w ^n!:JuYr(|p^vĘ$ "DMfv24$pMIY~ >^$mX494*€m#NA:̽CAAf,\-GG_${,O, lXx1eH]qA"Gg7f,\ңOHd!g魏0!Pmb!/'g3PH;؍!K$4ƺv i iI%jHVJ@+Ju!.3,|1-aNZ<;˨ iu!m-yAme뵟4$7,> Ed}p3 =*%)Mq[C1Q|Gpڣ֢m?>^]2̅b*u)?)^=RC^N*j*(gCRsCB}B ؞b!~xXvaH+[sAӆ`Y 0؆`y9L +Dw^ٽ5dN@l(OZ\ۮ^'ϛnBT%67e7,\g)qz}s>p_~bݚ(ݬٟrd6S K,v} +A? i*'3'E>%ܜS}h]Ӹ0vEZCRshX$="+W#֊CBu#񸺽&'*^W<}i@101,LV>_W{XseYa|YXy7p\Oή;zը%8mens ?^շ"ZlՓ+^ɮ<î#C|3r)!ۆTlgpNbڶKF>zl~y.0ƌ#?jR#(3E0z{s:-GޕZjg^W 0lj/mrYXI9ngv+MU9K mی ` M#P9[}ܗ*Mg$':Dq_至CAQg跴ɍ ܉" k:{$QLvyWa `<Hb_έ[.F#IrG¦&&FæNWgtT4zZg<%Dɵ5G} ћiP]n._ Ehͧ*漧MIՕ0wj~kw?6)λzWhޥ:~ߦzg}9}G{("EJ<,i?3cf [ AP[Kvю_( FEqB~Ql[D/)c]w_[9<إٝx_Rsmd9?3L^c?ꍇ,xhs#D#լO)6cFl.`Iݠ=x&oT0#@VsӹSv"ɩSK&HX)dfڭ7pjwԎU9%jlƅF!\HgvV"O~Pd-0E")%rfnߊzcgv tRA;gG1؝:sj)&}~]} Urhxtزm W}'HF XuUCUC e>IUJ mMjii*1R$b6#XꓴP5Gk_y;\fnAl6U,Y.T]ӆTQZ[Bv_@ ȵm1V݄&@*i 5$iX @^r3fdn |#w3(PRɧfd@,/C>9W ?p|\rw^?_/͇\}hW=]XvfKf;۟nS_ob|;ʹ}˫ٯT|{ue\۝̮0)le[>q䞔[_0hr6Lo5fd6x#0Sm(>jGETV%+1&d1FM"nkd9rOƢQF˿ MLPKӆO/L XLsߏP1)Y3a^HcdH`eX&H$,z^jFMs RE=ԷO;HڌQD(QKI' IB_:8{aj6^.Tl]_j0S) Sv`JIE'0/"琇)KJ~BəKEС0D#;{rܟ δ 7D&31|AgJjz=xXťaT@ņ9px#Ϩ!$dXvEE#o%o–k~WSRJUSP2 T{&i?-M -K| K=\QD_8jBxo?ܟhڮ)GJܮڑ ^ySk)rMl_6ׯGȌ&UEU(} .T-k ɽnWr Q+P I=Ϙ7>b3e$Q>2D})uӶ~N)FﹲV˒\ٹ"'Mt@:?}J2IXG޸OT?]o#2|J~Ug<ېsqЇ3av)̩I%^0{ѩSQ|#]NLNDaF,0/'ΩS/2Pݼ|8̓mVۜCȊ7 >vh8}pIGÇY}|D{)Ω8|1kcj[dq.hu;#$T$snc}V5^@7;(-[\;i#:;!FFO hSuop9۔%-}%E,7[oG#&:G=D<.Q2<)z(Ԏi9݄#%E#/)n k;;NjgjI9)M4yINڭ> V~pV}/;X_[=fxk lxgà9-6^[Aꪭ1.>܋ӖNSs~Szc"_B&\8ʐ[wIo,` ?]~I\xs]f?+YILӽ .9qI濫OeTO)Xeןf)0Okʮٲ:軽5F??>vv~fffSI_\B &*q@-*[N t Q+R >J_ tMM^WM'|yI=\O>T],tA4S8FD=)۫A/AElBG5[L󪞀K~'M{p i l0 9rb`< 'oogwB~wa\[ ӖjUpƪ.X؜ukzͨ"%`U6A%92=n6VMLö GOb̃"om2B\eJ"`1lR-^\0t JM]&06تДM3X`E>܅mmGm0|8}3GYջ+)bIwtw?v~ǘ]O'ݥm SlVnbxW0Ilj5 B-I P]msF+,}@WUCvr[[q͇K EiI*/~= EHA(Sf;]fgyG2(o}M]ZSO~ oZS05g4tŤ "zސR2}Zɖoc*5̷SiHKRZ?NwB\r^w\j/>J9a_}tJK_/sRԆSR8/f?]7#E:owV?G8coKŁ38,jG-^wn:bPN$o.>jU/V_렣.j1|^EqѺŤ0^)Qb>fj>6|#ĭm*'\t |v{tƀog+L$t\QjwL>JM=V(mˊ@5(Gys><}r IImH*1߀lZ8׵I_ Ə{ ]a|7sAS&a~kC>Z>t㞾CKqZ8x:pߓ# <'g0x<2~-=_@갞= #gto<h÷XCr s7vHܾzcO l|`5*^4vv@vPbLe1^YoᅬTęjӳxN3~⎅?gwGgq@o#$Xb'U/=#SQ]v4~8)zoOUd^ap[EY8@go-~,+[NT/DeHRfj@ `֚.ê.7dgAfb4d}D<o_QgbPaetDgL٨z!F@_}#/Ћ*^'!ͤ:-Nr@4BTa}AGϱ7Uיl aT6 ,He%T\&oAQ}-̪oVhhaYFF(G8lI)b5j+lHIxl<܊okFljgMBкx1} vks4CcQq/Sm>L'#9*Gk<-SЧ5G+X 󩒊?~y2Y\$4f~>LƢt(CIy&&;N`YgߜDϜ C)n P桻Ag4.ziF%W {Ua% O%&Lwxh8'[/C6N~Ec:f5x8Qk=*MMBxv؉Uc;'7e[8i R䡔t4J48kPxmW\ ǐBxb_gN]TTeEh/LNi*U,Gzz|iQ:2 ZmH} %e+=WQ] PDKFmb6hsI]@i4щ U[;]Nz%1%9d}Q~9e $$ɖ,"<7!y=%bɓN;Y#\,HSἒJîv›sǮ=w \t\'H=6e 󈡒eu < ' 3av̑SMb*$>̎];=sfS(Wu+yB_&1oxO!ߩQ苻=]4+_bL2apy y}{du}ZUT]qxI^^sݿyϱmmI*^MhcLP+oe'N 8±_ Au\ +u(oMR6Ko $:aqQ쀣v5cT>|xv @djw ԎLM0:1k7CXEnX`=LO9=%5z{TQi+t'Y>^*W*QxpWs^w ]6.`jUaz4>[}w֖sj65'H,gL"Ev 0gN5y~=zB~PRjz3̴lmxZrJ.RB-J*O`hV8 `Wb߼+|!gO<'(r0Myv6fcpl b i$CcϫI2EFr8]0(I|$,#e ~{f=-H͒/\*= 4qJK8EСsL6^|_DI;j Q:sa8|怤=t0NGɕj7Z/Va&,?X]<(I Pݥ.%ŧ͠<<ʯ{-|YJiP?,{ܬ C.bfDin2 Ms2FZS)X\Xd%"zr&]Qȷt0BeӍػw}:1\N#J>乻Hn6ul*˓zX5yE.UUQkֆwVW+toJFWqy{q>KGIIO_B":tZ>˓)K^ Q\\z?:j#?J%-%j9 բZ^PwUEms~4OzLG ʷDc)-]##v1gbm)#'%boi4^]l߿W\]l'/3Y0+CZ}^mVtnpLV}4ɯbMPOČ{4߃۸s޾7OM?6"󁽦_kTQ; [b2,?Z3Y~ByAHi$-Q%PZEx[GqM]ja엄^'*4b}Nx;٧ w?14|8 \(P|Aœ}*t,^Ov_jdpOU{TZZ R)19/f?]g7J14-Z;>=xFŮƺ"R;_P7 x|/7l-Hp6wwÎsTiјvc&lkxl}jUjnq/^I,*3gLq58pn4H*Z; VZ[&L[Mdyn11VO=)GyrXw@6@3;jGݩk0gN!\w]թt$(Tfv[.ܙ6j/]G5^Xoy۫[' (H՚}⤭4rOȚiR{61llH1z&(3 ?jh04I'햯n#ګgMn˲{s=x}I[~|l5s>[ćK82+OÁDCԩW7yYDz9k%1ņ:Qhئeh%kG9xT1L dK 51TC@a4{-߾`]Z{st{Ѹl)Z*ǣm:Xx6k:o4xMiuH*j@ҙ4η; -3x&ެyM^azlc>8sq6l°T&rj۷`bw_PnSJ 'ʵcbxrIȾA=TӀg0m[뽋e1r RbnQr-t.3}I PsI|{9;1:lvAkw\*$'l#jemJ9Mq|mQE}ʲ̽]CzuyËJLQǺq7SsHbUX]]-~ɺlT&d= Ɍ;ͭγSJM16m0b88|3"7ՍB=3oA4"Z)|сOB ٳ21g 뭬HQHm:6=1cv1z,u7`Ď -~Rt5C)c o0J9g}Зl&GZH=?|* |D@>,;k˳_Է33sR;?۳Y?oe.cgc}b~\}ˏg/nߞ_>eGwzq+3OlԾӆڨEpUb{L W׌Gۚw|N wJ(ʅNH@ylH'ɷs#z :ib6И }x'~h$/=$n/>P Ii/?'%M&@Cc¯V4Z~L ko}(=%hw0#u`Vr@;^.)"Xbh`3!j|12δnb~c>)Ȱ `nȡ?#kbs$yOkxST8PL7ݧFļg{7 Ψg;<u<eơQBa(PĐ d SG # uEͲw"YV` AVh7ƫ Ɍ+?'~r=xw[ܷGR>=̫Pv*ICX'b?̪xQ/:p oeYr69K?߼_:cc0N3̠꨷\*$R1 ->Pw ̄237`sbWLSD"Ow7}BT^5 2B6BtU ۫W4ߥKJ.yX:<9| 򕪟We~JX]Ez.I)uʚ~! _Iz2C*&_0zU0Lk] ]-ti5nb)Wrb$LrQJӈ5Uuy{6'F2/>wJ줦5*яN j!D> *yJ-3$ [xm\bv&fg0dtcN{6b~X|ⱇ:<5䁍 XәRmKqyQ!deSk[Jce UY9_o屖bˣ#{-?{ɐS >MɽĈX+;ìx,2Y/ՓOH晭̥I4c3W䄨Hoi7p:+K}gc-d%j`Zj.R 9e NSXB%  hnhgeuyϵus60_=[YEVH21q̽+(#cˠ9-yG6sL6u.Z9[ pZ:eEQ8c|8rC0 eIv-쵌a!ɰztֺ*r[U"ɢ@ <5m d²ʖ}(񐎵{h:; " 0ꗕ4f} 5hbtd=ꀠFƱݦ>RцG%#ͻy/_zsإB^DH^3?1in+ktmYfi%bUTT%;cj(С Ҕ;Ԩs`1"{i&?,G'+a)|襺!`} zgiXZ<51dYg[ugKỶQ ު*sL~H{e֤]Of\d"9G9qé糂 -3P[B4.<'iFW}5` I=\ vd) zL@6!,֡eNjiNXf,/mIQZ6Nb ˧azgƺ}g^>omK>B.D2fDG%2 N,Z ʢ0O(ML9I+mѧ9(=I߮n(j5*f,!O&.'YZAW8K 2+*RL٤jTܫԓyrgq~6vQњw]RE9]ƩN+.R~N" ՌEE0ѩ%FeC~ցyh=9EVx뛏e^[HS|g?(Ӈegxy^_ܾ=|I˼-yV>+i ~zn/)=`3s[+^XV+))CzQ21M**2xOˌQfce8MYYד(?)5@d= ZCD^\OŃ_}*F鵘 ; d'g B3 ~#C Qj*}x jnsS3d6"}dpqۧݧE; GucSh|sY'ӎǎv܍)MԦy^;`L$[W;Қ]uvGv܍Ԭ-6ѫsAmҗ!C W'u &]w.a&M H-[K]^(l#UԘvbT dBv2UT[^n,ԨcHT9cL,hAӍ+e;ِ Vi' \+]ڢDo4Em/cgIꁥPn6Ƭv^ymq&L-/UEׂ&ߢyLtУX% +2.Etҧ-t_ӢwE3"s ,'<}' +O=c]m2sA;AgSAPЎ,--hX*mY)u#h'hתod@f=SB;צyA+ m>G]W|!t/J63c)`3UÛXzj w+xgt!km ͐}[nu|pdt:Kv2~áLkbO^yuS-<θ>,.NxMw*F{7u~˖~b[~"_˜^/ Q|N˦qqם>=\t}i- ,K~ȋj+g*X*OJɤfY.+4h2d:{`//Tws?zrGTQczV6x/($n׷%>ԚARjV{[5(_$"}\zqڥ mFF&;S|=ګCE?KOϊDElDJ3z'pK7YsX ҃Rm .H+RϏ f#RFoOhyy 6C1:**ݨ|8.ʉ[BvBvK**δ]P%d'dצoľ䱓wpƩ2;,}ޱH-CA;ry7AelLpf]1Π^NЮU4;r(uRhǚ7UAN% mNeK|U,#*R;!uhJʺ p_Ra6s[J|?<_}{= i>>Se,\m=?QUY*RiX)WeBUOB$L>cϽE{s/r$<$#HU ^[`XyQ,KٮoVkY|If;OjepV HI2J{$sTGU$oa"Vl8E*Le/%'D. $=m@h@W;c݁*ˡ (m09)7DDI2=x.jP' A·I2( O0;]#PȎ;rii]xF; VNȮ]'vy>M+hEJv qwX<+2l|y]_^]w]?ԤxQiA m>̾eښջv8ep^R*]{Q`G=]sCKݢWS}(]dպ$|,BYZc8pqzcT.u\ u])Z"M#8xlv;,\TЪ%V8K$||B|,ò0xw]iPǎ[Y\ 'ǻK_!(U)"s<'},#Esf]y&hv9zn$gN XYt532)hMͻ.dnE<[EYj9\t ڵ[&hwhgN ɤ9YhpUk*r3f(XCYTN`vYl |6ee[Սpݑsa pAE򼲨@ mQ>VxZ( /\i. -Ut.r|֨]L;q7"`Q3i'`wz`gwT>W  !V! E RkUX[Uż+UU68k gXcMkaDcBarr %T [yOmD'Ǐҷ )1rmsV\Up+esB. )Ӕ$,bם"M~iZ6k9I1cc j2WSMƼfayssw>nN5kx#l|SvygxJ%V YreˮLH6>97Q(TE۲IJ(ײGaX8Mr8ju.f!T3=s?4k]j4¥ g72c󩉋d)n܍Ƴy9N.5;J7_3׃5??_`˵M{ Zײj3y{tȩLS#sfǖ$c8hB祁h@W_|n&w%_k46ϻ_+YX&^'McϝsefKӄFss!^1a'0m|?Ϋ;rʱj1vޠ*OeKR.T¼{ ,RURi]ctT01U 4fSUg:OFP''w98~6n>[}fBjP+ށ;59>_ˡfyBMN!¤2)m]Qp?6p[:NHjcO`s_Ǟт>[~ۢy<];7x<h v=$A `ɖrG9~wkmfir@,ߺE{cGnSi2x 7*viy+8Cpz3B8[US]C݃w[G] q.w~q:ۮ~ʷ-6SD6kԋl^AQ`袻 OPӻ9?;+mЕh]sEA:զ `R$,JT&]Y۸+F=\iIҍғ A'(R[Kr- ߇.,[J-SCRI崿H$p$}O$5xA]#ެX@\Ɣŗ(&dQOA:E5/x$c޳AB=,_84! N8tZZ[1fhCSyof\ȵ,߇ }fsff&~ 0QkAjz!6_b8KSZzٙ{n[l)~ao37%K[$pB@ƹ ÑP$:`H:F ҡ@Cg M\NUɘ. /] |v,B谚qH.=0 맫*z&/l Cn-9at5˳#T2,j4[>[HPrZUT.LtveOAjN{[4@%VA@RɻiZYd"e RvB x/ Ρ'tvX\ N45lcN:ʕ{õs(N1pAm_ϭD6T8^.=)LЏB?}34Vo\pνVp&U_䑒乚h%l UͶ$). P^m+DuwSnl%z`l#g W˥Oj_%&MiƓbV"WCOjX, }ӀڙPCwA04FgNM[Yd}?`^9 O`ǶNj80='|!ϣat  2ZO:R#ao;xhط;/!%z/^pf'/]|Ȋ@;a<tF{Y4fUA q̇U![2dZm}i^#(o9jrB"eG_LL/|sj)?@*Zmmߥ1^bХ>!F} <+E^ˮB r'Anx亅Ⱘ8jBmX/ō(Gfljvվ N 5("*  *z-e!X(N/L@%0)!:dĬ&B#߸Rk:w:v_wspK;~:6= .%x;#u *&*SLb=2!{!AN3$DSevKܙ/9 CM~ʩe+rjpt')yM3h%!&_?׏:OsJ^? *xqIcѲB(ZHu"<@+*Vy>t+USĭF rS{$ ^Dc8wBTrg20ren- 5_9fWb,n~V"֍ n'Biݬ]Csݥؼr,LHˋr3f1M#J5TNNIEcq-lUzly-zW@qn0vлdw;NYy^C0%OYħ )hEkq˫%-7"R!zc a=r+Yk+2ۆqcؔna!clWר^:`eƿfk=9^!\.:=CTMg/^!3+ZXpScut%UOsB`rP5uÌuYj~ڏՄKtb*fUo>|ΣCؠVb^y+Qnq ZƧi%:6"n<+#ԩ|87ҾsqZjt009gQuxJ2,[}xj1?Tw/Svz^!&hh}o믜%H"§MW75l,~P)qm*8-~%ڒ@#TU7[!XޑU42q{-gL2ceCM2B\9Gdl%r{~bV^5CFXUBȡtca{a =,AW5 E6{W]e69TE)LG5< * @ J$B )N~(Q%siy%.5HQ8_&fiWש#TFf?ڿ"jW:i?i;˾C;'(_g6{/Ϻ:#;}uױTܝ UՌ@lՕ\8s B1F))2Ԋ$:`H:F ҡ@Cgs/GUXuS2n0G:HP,L ҦR۶2+@B!!3Ȣ?jM+QFi5p'ױ8<Ǿ0 LR}+FA'0oCvCٳ0 MvQ |q-{Y~[peo.b\?0I} s߻jVVb\(UGX8Z=ikǘ)H :_saidKh| {Rz:N&42IwO/⫤7Dzt',s&]Nyzw:w}:(Q|]^ұ973b3D%D(]2/ڽ} Kg׳ 繉ͬkP9na"z'yan2?4bxJҫyZc{Dhؐ@I}lBxȵǞP`֭G̩gtI-i:Nzxgy(?s׬뺓tkdz*rϪ l}7>!߸mq6x~O< T@QT( `撆DP^n_B uO?ߠ|,gCsTN`t!%sOG%8R&$Šh-Lt'>U8^kC6$->iC6$mC~RS\drX0·L+BO gd~G+eSW*!, a9өVՀ/ AJ;Z' 1@Τ dB셔(jFҠ"ϣ407RF rۇp`DIO 2_ɣswmI_!v~;sqp ~ZKxW=$K %JЃt+zBn:ZvwUcdPZIiNpmugqK!9Q"a;.$ G7ӋqxY㽗^G% h1LBҫzL<6%H+_Aǃe,i eqz5C6ĿLgf$ч$]5>)~TBo(/w IchG%6mlmJN yݹɅ2{kr?Y8׉m WSewpvÆSqw ;V%=*?s ɜzxf- nF2@:ETwAYq.Eĉ&ErdTMZԐ|xw!,,ꜾS'`GEQ+<"X͍2Nx*Y 0$}%q 9H&] āwr!RWBEzZesѵUpx쁥6ʄz],\L}}@kRPFVvy݌o%.#Qͮ EfyBe yu ax}o#,E^|k@N oWkDߴ^Xe0Nh/Ĝ N +ñ д"]"K>a-$Ji%]+"TmPR?um!PVhݝ0:",Jɬ&N>YK{V])ס0_=W(%ޥ׋[`/ ^W/U N%^D,-q z\2x7GƓ9z:dG{iٞ]mxPi?)Q1lxjaf& q0F1z!Abtw =vrt^ :55?1$\;{ > BNj>Զj 4Z-ױ\hfqt1s//de5u`j먋)9c)#R܏ 9. W冱"$XCMaJ}nԂu!)Rټi嚺; S%8 Ĉ!SFTNT\* :kG $M &8JM9`=ZE@tV)8Ƨ$2$j&{Yr利>sv-sR gζΜm(72Kov_Zr:f c)z-1RfTvAԜPj.[K#,kK[ r;^5. o6K;<%mkm־.bp['ٸcfla+Ǜuރ'fkٽl\E]ri_eށ**Nݦ!QzfH0_=h0)c+.qPH xe 6hT&EJwXqpʞ,ު @۩2rIFiUo?Wwr9'MTW$1uV2$xb 9*֤efXQ@%1bK(b%DbnIJ[yV2Jzr ,[ZA?[H(cN XHCUNHR ,8 GzOK7O$WĢd{ J*v h#yz "2 E·dIN>M` P%Zw(1 x 1޲;!$m(&qzuيGA.9q^ltq i,ɪ8U98]67Knzݹ1@N] TnkL:XԊ)eJ+Fp}P3e54І1B=%5(cb+bnXUq0"DW҃sCLy@/$ tRiM٢U|'ŝjaLj@G멮R#Q~Jbt;WZ5'2P hwzr Kru5-3? - ̅ L?Ĝsmdpu*YxS[d­sy"z/$VjP+F[|;ˍ-i",@bH˜w/Oќq,MX ?י7[sa)y!%l^ =.;Zt8>MeKÙt@Өc]"Urg]+}_ۧ|i80@ ˷}P ؍IC@<ph ‘ ʤ¡^ܹ[_ԢC6Udyq6}DC]G9-*dznWksrGi 8Ww<Ђ-v;_wc{bH7ga{eF7b0'vf+[:AK^64~.7wW]a*{HmY)y *aCns R fc*.%l"s]} ߰@ݵٳ-[h̡'FT(fyiqpҎ'9O.U(_K./(i&;RJJې*[BK}\)!6hF*mDjp5[ڛ֑DfO?ٳ|$9&hJe_8%ԛ454CT\9t(2Z+ڄẖp+95e \dwd,%^_<ˡXg & "y]jFލ;Ƨ8zn?؄Lb̝갱й˳춗m q8W?1]z`1!:qsV! <._do9:=s{.^>nS ~SG(~Xuq0HzDR*Q%(A<ŗ{~h^?g?o1xi< o'mI0ϴ>xA &A3ڕ-8#UDKvD%6Ej qp 7~N\Uד 3'_ρSIukmLNsz|O~\uNBJFn}f 1c*n sT1.`ʑHM879 $2`dd)#o?ՄL{0pùD c1i6%pe'o r+?_?G_'F[|~Lw @ž;tWfUC8;I9'Loy$zFs3<)V_&[*L@' v/ $pY޿/aB)-ZǚGB,OFErefF(X|==sw7_殦p75r!Cq#J>TZ,f/|R> cV tZrrdS'+%G_ XSN&J7s_FTPKyV[D[ Wg@G';-fDƨ\7/pli5˜sq>==fճoqxӳY}q'&TL'a>0 Wׅo> fN_ orCcC38\/.\GQ\&9o G^s3B/pvnKկ|* __~OLpsP&[+gPuA9c {ύ]Bue?K :֤bxlSJp\ xU|~=yU^+[^XrWnX~{&I[irQΖʣs|=U\|k ʅfllE(߮y`'/`_zVz2 BS6޾|5yQj7gQ%ɨʫ?.ތF l@(^YRC P/ Kxmv+PS|rZ,ځb̬b<~69M׾F*.>]ݻ>hO)$ʒzJtP}|Q芊_HlK/_ufMʚ_/%V|XY])W4Srv>iS)b6<8iHPƍ*=ςU.5awW#4}tv)!b^VӸ[{m`'+jކ4y?Yp])gBiB,[ ۈah׊!*!3SipKP XujA 0w6MxGa}jNn Jzx.ɋed rȷJ,U!SPmvsM ]z84(/߳''l*IUaF#>2N^fP,A`-ƓQ8~̬AŦ&:*U6Zlk[V%å{=qQ%Z{7ڱ7Ee%20TdJJFƽG~Ir ₆%$(24ƚ9Fin yA˭G0vYl`|,Nҹ[.=+ @WfqGRB.buCSnh9>=v!d~O[JA8`uP訽-O'jv$`){+^k2izcGgZu!zGGc[~nzcD /θuS,8avQiyǃ!i90$u遴䖁@o = ŏSoнt1!ؔp\r5Vv&>En%(t@XkU,EVuЂeMqb\ {k\-\T"1ä5WټOi%9}q4%jqQ)v/!J|ɹL||6$TBJhS (% *Vgj/R֧<қ{s4߫{纅Y5l)7VchmQKʻlZqYDOqvGjS?])nsvg#ߚKIRg4rچR%iSRQܚ}7)*;EdhQzx&bkC&c*WadTY(VVѵrAo膃]ÅyWWe;lQ/WԒKk+Ew[m(pM5\?{HduMlWi|)GۥU(x2W`f5"DS̃@q8 D잇LnnXjW !  <*dx6G(,I$ V* Q)n|Ip52w`_)ME3uwh֠Uh[SѕPL"HVBujWҽͲ Vf9͢(5뗫֒os1ٷ^QUɦ}7~( /MC_cnyM~|Q:sVve%i?܏´TqZ+J00²VvfI eo7d#0n ۨB6CiJPuC!$چԮҭ;Zӑr$w)T=H})ZZ0t;++ gMcY0w  K/~ f8jƒ~YYƂ.Tﶌ[ZYѻۺz]f+VYFKt:n2\="X1b<`m<]=hGՁ%Er%Ԁ /q%U/IrjY{[H1hJ6~ t[;Sp_Z{Ӳlv>~w-[l, }3wx[-:$9s=Ik҄  D0x`0kqm)umt--Ƶ@ H0s@0ƣװ,<&(9'`zd139ܵPRkk}vγƔS#}z`PA5GYbyXcrmQs}X>b gNc̻czj`5_^ i0Ȑ,Fd L4<֡Ip)lnS>8 2YPiw8J+ E:H 5. N"@§m~ie[@k* ;^GQpE1DmS[#۠@sI ARFNeZs1pSZ4'+J4/  ^ C!X/2B2n%)$ƞ6IyIl7JT`,l0 rj1+X#mZC-Ŵe(,#H$3c%Y"vfȎz{o1i7y=rŌ" K)1A02!P/VAR$J@51(hёU(F*~l 6֠dwQ1v˩ufE)YC$ow/NI[TlE턫>T1JY).i Q@I"zXT"n3!TR3qF6+.h/ses:cOO`!`laɩV܃ 4{J%H?dc-jMQkhp$R$C u l;Cjb̃!D̒1K$(uL8.2[sDttKbtJbtufNwP K+P) wH9"Cj$I^٪]P惺ښOG~Ҕ b}h;kb)q<UMqt:,Ӗ::iip 8"g*heD0""2ǎp7m;EsŃe[jwF-0|GÂpɇ="V:$*$9PG@)p *4K>HPJλKKFr`>a?x\cR V Ҫ+9Cx@1䡨[ңFh^byw;7p`֚RETz=C(?`a*2)QIM`8Z??0B3^E;;&:A( PY])Bp:lHP@eJK9lH :T萴&B0hb82~IYs@ j ^P3WƼQC$spF $4l!$%ECx & VedZjb$6N Tl,l<tkB΀o635Hԇ|gi,\7e-8lm]ޛ PD`> |6)bWH[ܳXFyfGi+`0`(X\^ȵ"Q?}fI;0lGKŃƾjZot>Wx9өc)Q|uUJ9_t]CxQ%SBeSAKc76˙`vSa9N+k:9iƒ\Xd4toeS9c(n'T^x iS)_3*RC|B4|&1iH1Hv57c!R1(CH*ɞRuAѳzH)<BROCi[>Ƙ%h| M: $%*M{ZTqL>V逛cڙ-#H*(&5Ccm+(_Gޕ3nǻ 5衊׋7 @V ˑ,q! `aJZ08AHrJ磦\\YK;R0vkkT'_ l/\b*PG 9 8#[x[I <yHPn6"R;d!IZ<`| 9tKkqGȗ+ߏ$= ŁnO5=STF[_UbXOUߍr J-}{%]w\[t _&]<AF?~[IQ^v1lGSg ѤM o3SlPbBEh6%Tf^WFzUyMROS7*0e?m[T ?,2¾8̺_9`Va|u٥G^RQ| :&CW@pF#crgw7|ʨ׹ ~U+b]MxV]+ ^qV^jk ,)\w L% Q<{b)WKӌا)LP.Uڻ=ʨRD4ޥ(c`jԾ1)J?.Qf7m]=\z>ha,_u"AwpWb?PI)^Ε9;dbb^ٍbw;&5 DztqѬFKi]Zsx=]NGګüzlHinaN%YKaҌ[$tP'(: 7uT#+*j3Zl*ZYCĿk&-iSS"uMH+ҡS('צ˔h4ήW.452mDd-x󹴏Od#Cs|Bn22I~KDӏ#v.(O4^ -ST{|rlH?lWwhb]bMI(}BEخإX(+U(-d\xK,H#v04 ' h@U4]d V6OZ*م$5'H$"ѻnj5k nx_bE<23W&HiA&RֺR ~1 rM"~iб\:9sWVuIHX-qܞ/}lM Rk >8 6k?K[U8^Fb1_d.dӼ¿YZݒ*nXyu+}XcJi_$w5J}{:~-Q ~jڐiR Rl7^'efҭQ˥Ҟ(pm]k8~M $?nM/f"Zk 8O_C{!(Y PM9H73Bi,>L(0^Q"\F^=]gx.K>l`}{$T,cl!IK|d`YxrIDyꄚ0$\O!|IL6~ P[*a}M,@đ"&tf"|~9G ΔR$N.siIQ 4]hh\ sSv!'i߱Lv8^)}|')f'LLpq ۓm22MC|-LAZ/eo-"/ 7(b:ZAѥ)dϟ=A8apiѷ]LCf Z58 Y1O =DH $?5LHum,yj=bFg; $vNCs 5<冣1`x^T |5b(oo,~-Nq#)lߘ8o=p,~έ}M*$_E =%pκ9b5_rcN@nƹ[D(?;l9M \|BHF^>iH<ٟSVuW̕+l˽HDj.]?lnxZaYvb2"mH??kF9GX^+pkWJD`^N}|YV_[l@Ɣ7HҊ&$(!Gh]GbEXns"0:+UȒrI;[X^0"⬍T,-5p\c>? 'B?Jsto"k4y<#ޑ r- t|uι],k&xߟ*JԂ(r ,d:Iqvl@f-4w1S{M޷g ۽pqrH*a(#(҂26 PsD(9CJĪEܥuzD+Y{Jjɀs(hqܟdOpM}_)`kۭ}ֶ@({֌ Lҕg~@ גO>fyz|V #"FE$"hZ+="V_C{Tِۇ֊BKWӈD[qe0 !56s|rxK"5k:K 6 APMЁ׭͟uRu+1b05t!-+?F,V>i秢&"Y@C0jٳ'U7 '"W&p-vn?+cw@gt4nxn'q=X8BY<@4r&?Çftn9Hϊ̶cm[t=+Ҟ.8zEtW oۏ9U`oǏ l*/8=8ß~fEr]=i$bzyǿzw︥&up#rf&e_.dV?f;?*6sQIq V.mR\، cKhj0X Ō\^Z&y):f#TIBp0 TܤIδ+ a ZZ1a9b!0ך9QKέŹ6"q&=d#~h t̙c/_ȱ4jn,Z&hL$xʹ BhkCL)ԙY*shfi 0%4r# Ҝv"E 0\ΎxZ; $Fq+̀T2CxHЙ4º* %e$3dXFàϜ<9\R,˘c<(z%jQVsR_FN֣ݽQJ.nT<Q2mG`]ʅ?3oUօlOX ϸxhG3]FH *jsb%diuAi~Z)\=#`o];ޱ 4AmcEO+d&:qvMm妓!AV#K$'Nw~HY(Ňyxl" 9zv&x>j$+φVXɤ4Li3?>É0<Ԍ~unN03P` k[]>bNdG0X21&YYA5$4|H0[S =%Œy f$Z(hMGia 5#Z K~º*'nv#X.@S*ـ%I4wz #?.+0p~r/S%q#n(Q|AW76X7a6_])eUW(ÔxT`ـ$!_ú!&`A3'1P*biSTT"\JY Uo*i+a`(eu/i#!/ևq <JN}eHyO=c?pZut~9LbYEyhN3$ɒ(uy@bIxli`4;)(1 jC9,X$,9S==N؄^eM5/W5Ԃ%Te7|uXghT'_*vࡼoiAuVz^cjA d??xbfˇ"̱yNݣݣvQĢ3!.?ٟI/?@ gڟ8 1j2vH ކq8/8F/Fd\鷷)ϘpՎe˿Q#eCh/WXy#~-+#@4# Y:C-⹪yڒ ,/ ՕǁHNsWohѷp&_ڌ EJxvԥ$Ri^#9 6=ڞUj2xHX ي \}AX$8Q$VЈaВ94hlME+0{åD e,yPJ"m.P5>ΔAL\R\I x!+}UcQ>Src+D[k|D`ʷ)4qz0ӓJI8ԘF5*9 vz\?7F\?Ǣѧ.8=/AzY;0Bgt8{}nO'Ɨu|]ǧ3hFՔ cp4PM}J(Ìh}!jG{> Fln}kYrVQw~vhCʞ=o|J ƴ\]ܩ6FDc$҈JWD #$:bDj0B me|c?r1>Q@׎aPZS^pCs*0-3tT~5gAx5-,S~>Sj]?9Uiy}yQϥ{e/_nJRu[Zl/.#p|=G>MMe6/^/Od^D$VmI95OЉ́aӱk# tq;[p(eFk&ϓ(] u#|ͤ)0S=:ƨl|Kjt!4 [`ߨC'5῍ҍ/. [tc.VSG ?fx7.+ D)Tvw٬dV\G9Ixg+miOM[^U^O{$-%EչWgwz2F*]kTַFnjqtVݩ„@'3Ҿ|}ަ24U&E{QeK^y;NZ5\_ltEe.c,hXaT ?MlX8*RS_̘t<խ)>s,5 ߂n>y. -pk@=k y1S Xk&ur l1_t|_xu޷ZCt{!~+ԴfR W9~ GѼW!HQ4+}.ɓ)BJ<٭ԧ_5M\Rّ>U8r:ɹٹqgc;[Gx%pz;ҥz8|, {p-֓1|6=ό$%NMTd+E)1yDȠ(1&BjUDzy}KAr4"N(8KMNƚD8X,b 5(غ('c^E T-JN"iL6W\H!Bn +yj6\_Lc1jH:N!3ex,fRɝd)K?5̕x'u6é$9%ZYickDbujC / ӯnE|e1&^blqc7ؘ28_*Li߷U.1 mQPz2ru:uP!#4F5[B<ޗ0>*Sg.ݪ# MMQt7R"k' UF^qq?zʥB]OJU/pa:c &[!>-SWz ejf)M :^8YXf$8 Z9ǜqMujȲQM>QT51ŏ1 (EtlDM,\YEqbRJttTm&cc Qb.(Nʨ"#\Ti \8K@ jYfT H8Bp(eX{X楜~5uOooFU[8J'Mīgt_}vg-:f> "1Cy<'Fgny8KHI#fn>瘊,Z ʥDOoWW\CHL-՞N"FaRr)2A<^4Νk)xWӵQfN0%g Q/깇ӂ) t_G_q^!l+}V1x>߈pԗQM\lmC2EaĦ1QKV$3`cD^NI$sER oMNZ}pmSecـ+gj{,8ҳ$Ώx^ǔ|6.l5gnDA& P`_k*A<0FIMל~LK'eBcdbEB]̩wjTVA.^gJJ)tJh_+knHY=8$ήk{fQ9$KlG]Շ) [ ݕeeQ)2!aګ<ϰ5 oOTH Rrj~Ue>D~9;6Ո~%BϬ ކHJ|Z|*)"rN8YsO~2hUDr,K9/%3%<΃qd*CJ$RLpiU*{FoQR?SYdZ`up%aM]e(y AGp0nA`l1) Vc5U}%4blV+HTEqޝ+3x(t۶η;qwf,bkn~߾n2/]oབྷTz">g0YóMT0 ѵ`Zw+ u.ۊFwɈlleQv$so#;J4TH+9x{;U`H[zy jV9\ggާ!}eg6(c]vJ9!lv&A#:#ᕣ 1qY>0`o&O}3hho`0u 0zw5j*Ы+Lg?+*$/:e@u}h7>8>zR[=0-κx܇޼t|UKCW5)$91yޘ  4wa!2=,I͹v6AgO)z>^o߲auTY0ꓽb;v3JkS}x^_Wzauaш8%a.,,wep"aeWKٚ9p2 @nYKB7\1] [A︽ N `r) ]ړp Cr!;It'$M8.Jd<qU\%>#BRsLv c'PyeMxFj |dr0ZMd|/FY36|NQ9u4dE0YQqA_RE͔fB>-~CaHzlSYx P؝@)n8UHT޼P%b a^ˇhEĹqiwӋ coߗ}"n_.r`yUY8( {NdG`T{e-&2$,`#`댉$sx7tDKb#G(>c4u~h=n]2E'$4{Q`6MǑhʃ9z].IE-͈Dfdb*<9NcDI0QSRzlt@4VN 4'Ĉcʹ~&L/D<[N@틫+*6sHDN{=(I@c05S,bFqctYtg:VRt =h XrT|GvH@K*$5%3U F XURS%h8J#o"H 7r-u_xwq{{EvEeɓ;zY\WJy9sxi%=J|J/хOH)O(A-C|ZLjOyB10j=Fo!:ϚN_WwLq26xjXW?ʅ`l bi9˱=3wN'<؛Pmqa#e՞_.o1=g$ ^Ϙjf3Xr!b.6=KGEOZ=$Jz-ϫpdE ةW@/D9,s)(wa6+b Wt+] (;=wV¨5 ̸sh &8Ry05Lv{άy5р-/?T_ɿPD¿|~=֝[YŠ!g1Ɓ I!x eWWsSUH0#$zB-)-Püa9do&2©|Q H X0gVǃ@"wRUCZ:f"xe12 KRALqisPRL, n3OrnLfо1o)|b`fs7f>/׫qlcT6͑ qs-ǬE|d=_FaG{}ڀW?;!<{.~|u b ߛc//xsЋwnr׭Y-0!)I÷/޿3>!F_ʽ>l\DdJ{6GMZW[.RD'mx*`ZnلjhL;8GM> ˃}G6JUBa-TVBBr͒jt5c, eU.cYaNJ)#a󋈌qPF"d lI*#5묹* kO$K=|3A k][Y^9-?$nr3$/ ^ضNdIeI~n-f%2xlUť> Dd 4D*J"~@ -{ y^'U Ӓ\u`\\ۨ9+UO@Y?MɦF$ U6F bJicYP[B޸: o8^O%WOqx8?#۩e<'e|R֎DLL:{\,>D ?Zp)*-GO(X$`ՏUc{9Srn뇓,'%Tlիڰ⭓Sozk)1:ٚ& % IG L &(8y|9ZF4e,5f֬u7Q>O>߲*'Y =e%T}5%2I&c:}=GwѴdbv' o?^Qr֜s.e}<lkz>QN_,§GvF(ĿT'dWG yr8|}tp^Rw-MlD>#B4:=KJи}X<F0ȵh6w`lᆲțBNB+y|^ωp]r+LAsX_+biWKN-v0|3ANr G1 `/&!`5vY_iHB+üÃ@]\IѺeM3T/N^cqґtY.KwY^^^->Ls];/0ڇJ2Ptk'.W 3D8Aqŕt\ u<EeD{GyВB,jim1XjuJ)= /.&ۑ]i0MW24Z 毫&$ef-y0ߡww{+nG)"z@,0ܮ>_@|mO:^H7=) ˿3拯w&+-GC fjͿ0WJi tӐ߹Vwb(FE0rFlx=F zKAQwjy1ZWTYǫch$?1߇ۏuoӘs=_츸B @Yj`;-`3O/n4=;Mu z@738{[ [M,]&a8Ʌso08 'yKՄBkWF`3 &C>ni@o& -s ycbvㅚhP:?Eɣgu(L#4*keͧ-v{O[ԓuP:lޟuErrqZ-{Tx-G?tE2 v6=dim93"Y 'YO7NJ.m[qozk =MicC[@BQ(!H) J>UgPH#B4gc!lFQlFӸnIWSU*bSL/* ʣ;twvPQ3H. ԦwQTiaT^9)Uڔځ]cP\ĹW]fDg\Y9WMtz1_±N\.%E._e^GEo'=1DrVy?UIL{ЬiW ѓM_Gˇ {7|=LFKZD[;Z&&h-@вzhI:6Il| ,݁ӑW\sr7?Ԋv[A޵VWk Is+OPCj0+ V9Xsëe uc$xǟn|spnQCkDǻ QBa$;9ZHUAFޡ5{sa拭D7z̅ţώaY9ת.ݕP:xK,~ +Dz-שrZW =u4V= F ~V%`x xܢ>%|G0 Yp{7[U)'8u>b~.nfn tӯ&nXgwErʻR:g5VyRq3?'ķq`\g:+e^:AP%]+*ɿi JNd=컽^ג)!L`)XՒ$*Ѩ7&: o@yO&QS+_SܲW]a1*kEύ@Di??_t&jjLyToz6Hw+svgԮLL^XKៜz #؃Z,*TJ1 \wy4tő~Wi1K`Rũdb\\%S9i5.3H9]Tw.s tۃpHCJp&C4H'$ ZQt~9ߓAkJߚdرG /Ҩ\~8i t)m|DB8@ބ=^ Ϸn}(NizYFS"0#0 IJe(BfvSYֽiaӜK&F!B  Ldϴ+^^;O+aiӘӭf?pp}Qh+cA?+d6ޠC[yW?J%9$VX }759kUVco6VF89NBf/  uvbVg83V8v%^ё]UKkiؘ K:36i8 K>^fiK2`򎸾Kb e5sDm00y`6w ͦ.{AuE,ŋwPدսZ|8dE8dPQˀ\V$^.+{0gX.9gط{^^Me+J~66v{f.ƛQe=(Ђ܎TdX@`2>6Z=\hxf0]p+vmd³v oc`'<WrR}pquD%οIIr  Ŕ32 Ӟ0$.l  d O&EqE;P(] j0"^GDmՓxN~oh`"D@rZ '!L SD@#b,I H$I3(f&YFOFku0){!(~n_$.XFQ,_y fh'[~ /;QDٽ O򞉳\i?نө^O0cn"vEb[,P vq2!os?sgb@QhPPU05w”vL6_xIVg`*Pazeb#!']$g[b"` dO;L @8JXۻ*qge=8|XOj f\븐fbvTU> <DIh[U1bY*R2s$͖4TU&v%i{:uɓ@$F̀ӗ@o_ V 5$Sϵx7`ja4].5lsqd-N6?"Jqr>pߧ)U,wņjo]>/F嵀cӜO|3~Nގ;'4GgVWD!f]i[(N+j;t h!*e_88+#*jG}Ef19F`Y1x|aţbM+9V6%J%uƜ -w'ǭeeulƼA&~A\ljMuI6-5ճv|hP+1fB^:á0z5O)d}^n0.n·P-Y*kjr@w }(\6 ڀPMA_]yc?.q0QjkWWثSWxmce]e|+uBӂVHƁ N>sC;kc-nEX+%(Dp<4cQ$v?8iyי[\&S+"-T3bO߯/vUk&%j{hςa(*b^UcpGt : (@&RkػYsel&HdYۤX)SEah{q15=3[!TS?{WN_2~VO脟1kY>Rz~ή<٣[bۭy'{Ui4d<#\"dn*n =qyE򕋨L%α^~ւ3D:4[ٱN|& xwBv:մ1:q`"-ޔyETBo~<˹W!!_neVUzꋦ]S>Jg m4|I n7JqO_@ $Uܠ0(nH=Ǘ%÷ʷn?<(,K0]O!H(8ʏ*pT-$d`&[noLPgOaY@w'JPēA$6fF^i׺,X_!z3W}'\^:?Ƽ~YF< U7[7μ$aS9Co2TQLqL٪ fʬv$WH|//LHS;WJhN6&U1022壛rY ai2s!=V> Uj#)D*Ia304,e`K\g* }NL" H-e2џ T.d7&=Isvl/8r8ʲ\@@qlv0#/O}%2 aۮ ]p ]dW  m7dB)pXVm<Uخ:\DuЛCo?Oo9]OSɵC.Cҝ/̏deydq&Rh6!i]R@cӠR[e_D7hO(w8^w Qi*jV$B Mт{!6[bsIpei ATXY"B 3AMV~K|ݣMsQX]&}EG_ ӳ&Yf>̳ x )ޢzea @q"}Rj-CpI@b@zO%0a.X1mSmK)kTz:|?1bu3]"f8"znU -\Y=Wd^mұZ\m6!+&>_2)<#! % 'W! ?-H2.yN41PO+_ՀvjYyϛt5k`EB4dmwPU?jVIy@|̘b;rq9TAJJ!]GbJWZkuE%R?,ӁM.^q'oSh&ƃuLDUD`vF_GmT>jQI2)PC3WdR "xqN㡚Ÿ?a(MJ睉[Tfs:gt~&6DU/Ubϼ.ȳbZ7|-GcB۳|cBʞ&co\$3i&sш;{2{O̡:luXZS(uŅ {g>40a3[r Jzw"f5]+62)Z_ֶۉp[uՕX:Y55\+Zﭕ["qkeuSj~yScC1m-)jV#$ސYC-mɺ mXBhQ߁J;,^\VGf[U S?{WlJ<Пyip_؋ E\tςeQxꎇKGTi}D[N0 i[|1ݹ۲O*e\" ELH} q~{L3|ar޼؝K)Gzxy±&<'|8HcB[SsK%'_fMdqU:sk5( pk2UF)(3Շ0{'L&/R䳒L(Dެ6AF9A~ WLu'QmFL!5@ BYשP= $nh}{g?􌴒 *qQ4b6[U0kS5DA$H$nu; (ژxJۛ9-Hks]LE}5`1l|P'+8{C!a6!"fːGGxitN كjPrY"šK`薈)jv V-Y˃"#>W. "K y"Az6?BHFXCLKeW[_–7wȬ (F]X V25&Aee1B\",Ű/ MEnnr$ Z$pkwqRѾ[0keuQx mL<4ۀw}lޑ^ f{kÏ]unykvCբGPkt7+kpUn9<{vIY;K"P6g ="9{Wtu^_5XݜXT#!X}g77` xGA07 C e=.H#X8Cސ4I֟+LPHZg:Ԅh@;UYk}9V:WX5(PS _ԌV_xPjcj+ ԍg]W9n@+ƭII !. ,ιn\WWO.XF4^y< ^ק/ oUTIrxՎ+cוVr\Z|6G~zX9_Fj~}u^+R\=dcgb"*?Ub.𥴎?ag/#&d{y-ouqeٕVfBk3A6]46H>Q+wJ+g>y3wr=Yŝ2&1&rԓ(Aִql &[468XOc*T IIIE@}];Y͇nYrew!('}!:1}=x\&#xr0 CBJ; ]+[MmzdLR ;dֆs3D6f S&PRÌ3&wFKDpGԡ2#& o1Gh+B,@VjԢIsU G'mW fҩ}1F pHzy VGyiY҃=%6evERAɪ/<& `ZI Zn\#P;)(Pȱk!L` ,:Vǖl4[oHsk)$D"9H3 F乏T#C`OD@%Y^5Csŝ㧛|ax 06YrK8.ܗx;]dJs ܿ$/)Y߿!o^_P`4~xz򀀳U!\OL~x&Ś~Z/[\-N:Z33M=k:;mI ̜e{r C^:d)eN~[I&ĉĚ$/ =Y)C鄲.❚T*YFpn|Oq9ybq] *-X+QZp2\̿,ry$71RL $G 3q !\F 6}!QibӫK >Y pN4Maa0bJ< G#8k6zR{GLkdXӁG"WLNcFðI$Wi/ͫٝ}*>(dsu+OX`GCV"VY$ʮ?R#zyGc'KANaQj, #WP9/",l;QH\q5X8VEK\4a[_^p+S3 Q!e[5lV 9:Au)yKOơC?+~(9bZs(<ȡ'GS,hJq{s L4͈&<=PJj^DfC!Vh?9 L UU/x:1oM"-:Ma׿VjE^qĻljw뼻~O+A _ynp)bEC;玢h* akݒGMjYL~3c[dU=a߶e'[,ל,އi/^3c3U%-_.Sů@ʶEV6 FY-8T~بeaƲ亸6R345׳l"RlYfv9~VN%(בZ͘MDh$B8Q*X)!F(hwKϷv5|9mPY{-E:Kxe!a@bAyhKe1.*Ffa,k\4@T';q] /w%ۻnmZfYjݚbD!ĂQ[4Kݐ{-$ZJDwZ{=Si$~vD wVg0kֲN+ pf'ţK:Eά=Wݵ^y}RfAphN7]dosJ ڻ/X?_^?Bs{rţcjÇ?3}lx0CZEp+"J> ypo]_pdz̮jaN/H)V,HWNx7X闢T6YFZ޽nt*TTӕhvrKY+9g?ϗAcAfyv/ZcЇUoˎ#4/;XuH;h+Ң!ZQbAE1v MqJ;]使YFYc1ŵr8;NY]=:o8gR"sEmh.lM{:mAIfTy8kx4^o.DrNjfӽCeZЏjWlYui=Z,f$_'GW k\9Ϩy8۠bg",Ͳ%"GmBM/Nga3;gY<ݘFG.횕,H{[1jfpPPYK"bBzױY^:6˺)6kC! bz(0x!v;(?ŕ7hM1E0ǵ)գSq6Oqo꽛v]!3([dWWN؁J03YO>+M1U8mǩzE.CI8O/㨉ACwcꚲ! - 9>!x>8(QN D=(,ByltswH0aYNY4:P., &ν  )Xk4Kf$0$0cG-D$mL@gt4JB&* (p@/+iaf!!i !oS!H"urDIdw"&%\FI'%,M4# {WTpM,.!HGh/UBBhOS@͓|T(y!Q GR:*o)Z7 螠xx1(D%|QK}w`q7xj] p0ggȿlV"8ģ +0z-%>&=盧+&pm yrgn}~' ?WmM~SUr H[xSZkCc^P- @%A)10XX Aҍv^;f1\X]+A@Z1*2nAV`ڬvnW3dT#+c ˥*e"&B)9RԚ)$"Eɜs4[P`-F#a M$ve:Z"#F Nj% kmpaKasVP%!08!rb DyPA?x r1fA.&˟Sni}$՞7s,޿ }+z' }/c=7 !s<rj; ׯKcFf_f۬2.ka3n,˻h2OlݖLw? 9wINmjos!]<8V Dg[Һ_z3ul/a>_R||Xu=2Ǒ:tԾ BSU&R_ibMTsQWޘrxR$D&;dgo,rG͟E5+:DT,ۀjb4g=Սp/ATϸKe4Z(mRc!a{5sɑ`bJ+giRUܛ\kq3Q:)]hn9tS<5j x[,ל,އڢ/^;7 ͛3dsVR$#9(ޕDcNf6>z5`R <*23$\$b*f3"ڳg1uKFS]V V@ 3 #,6̺(T3/-!< #ڦc 8ñ{@=>O}xJP &9Rl6MldEfQcb&z(*h`:p.Uq#ZQ,쉍H7Ql:!϶_/yymF 4%Cmmg]ko#+/; #2BGKrt{Y\TUV1i˥"y}~:(0AcQ 0h^0yo RWdh!,w,p4Ŀ] JߤgcY@67܈WMg˖ٺlz :΄?8@j>N;(ؿA)Ğw]_c!rXݣ@!{ӭVgC,\Z``R䅷obL$݋%0|}ڈ嫔йG ݂Ճ{_@_u9Y y }* U%c)AֆYI#M-@Tш5hDsf9JHx8;-BJCfwF⦯r*r_Gw1+UjX i%+Q_xkF pve+'!7I`ҧT.E^Sk*5jtބdU )%\c``^ 8B3 t:?C Y 8}kCҧ6tJq.ƺ"kƪJGT>J' GAD5A*㈷֍kCk;:CEuw%ozhs$+ݏ2F` c۶bE&(vUP(XQ!%} )qhA$_`R7`X">&2!= k`v"Eiu) JcPp˭(8^nHCc©(xTI40T u 1ʄxH,- yDYNG[JI_zR;u ŏ MAeGCGf1R KG$)h=@&k^[睐l*t9q28$Hiyhni+"jT8SO[˽{0B:.p:- IV Y=(#(zEK_.U?X$]yӣp\XVO}s>&+{OT b^Y,2b.ol Bj?3oOP?N}Gдo.(sd4şWZי1v|#LzWW<)f`,ai*N-7~ٚ7рƲ''ͤ1x'9NZ4SF{!2(@DҘ ck~[˱{-`mJd%ւ1Eo|pAri0Q J l7 Iv3dHs[/iT'"H%BFgIhxOEA9bETB(G$1D`v"C܈*ލP+v׻:_p$@kG>iH|66{leDɣ69ɀmΠ7ԠVR6־t%hCɣ<5}+N556ghth18 e \) ETfo QJ&Ve4yD+ꭍ'0I 6k0r51)-Hv@QZ>Tt&-ʣd M(wЄ,Yq FJB>V3gDEk&$Դ&ּSc` 5'h{GC (b'ǨԩdyḾhNp:$%IxBrhk$j%uBG (oX_7M8kMlt̮]ɨ󓕴On>F;]|M-`Ї"f14C(y7ʐg \WjX({;Pٟ?4lYOlGZ4n#Waq#W`]rv``:]A%D|NTCRp#&nٹUb2xp~:s33H&KQU~$v1n].? gQkUQnrL"s;Y|\E3h(lB!WoE\i}}ϕ/dԓ ,HaD"0IسKQ&`UIՃX?5BiTFM>nj;Vn.%.F|\,nNO)cg .'g0ߌ֟8`/g1γU\]/]@jګ;j$yPPS ByҪ-@FR(`&!= }3jjHj\ȆVR5YqɎ?ˆ\,; |corO(!ѝ=Mu U/* +r7~ctt[X UZlR\|ܦuNZe*ri" IG(~ LQbK>sL(zQLЄL. 7XS驒-Xq"xcTHCҪ7lP{^+^IZJ>4 *%`u#ǰΛB!'+ M*lRX> J =]PFjk KȐ+M pڟv|1ƽ7-I[o㍲66)7ʞce;WmGꙶ͗I4t|M|Qqs;gf=ۤ=_nam67ʓĽ %Ȭ ka,E bJ,eIYdX3 f zy[M6y;l[xlm偑W,Ne}7bT6dT U^Y޹SDރ3FqsyZ??;Kfr*(qyVrP.<6.~@~x$N(vD쵢"Js]]afחU*pwdv?.>[_/B84SlBR|F_-#'tmc2dCyZ*PR)Sq'm+I;ؤmUl9$Mwhv5ݡٖ3|fgxV\4C;Cm!A^Qε|Q2-t DAԉ<+s6 !%g+,n NQT6 sc?_P,-yq]%}aBRr3+ Tl1?1!4Q橉8ڤE.i*:lʒ4,)DlTjVE\Px݈_]9NH#LGAJR9'bQY\L u֡rfuvHsG}-=P*]+i#KI .YPT$ԁ!ZvĹsjubzn rK\2T("MEH pym3g}RK-H1ՈE3(c K(؉\Ά 5*NRkB˸gvJ^q49RR"%Hqc".P|)8- tr{apHN֑\Iv7XIiYFp&"8<y9,Me*'ϴu,m\XeZg)[sw?O,~v; SE"ȔuHU } LcD ʺ%JcѪIĵ!@*]%D4T,bmCv( X[!J~5U[~/6 ,F:u;II_QTIc#ױ8 wj (rwv;N:MH $-gNCt*_% c_[X(9 BuZl 0/W޻ͿiVقI@K}4 D%͂ritp+eMgoB$9E5JfMy (7m*We*E+E0K,"ھpE7#%Eg7~5wgu#^[ 9FF@^1ˊ36Ӫqv3@/n:^|Y,Qtgx1kfϕ`Pװ+>ô57/3ȤV8ܛfRtRlۢf5l y:hAkX5jVjLSo3̔~#QR{cKNaKNj=_CM<[=|:rnM$ ]]7th0p:1MH':-@9폇ͽru>1>3_6Å/3"8Z45ah^k`n7^ܘŧ6͋ Cn>^<ҹ Nb(%훣EΉ8rRS:IoZ_g奱hi\vY틶8l%pw8V6M#i1c'Q?:p`:JCӐM%cnkWo~m5hdFfuy4Zgf) ^*~S2NMc`z&m+Rϟ\nD8nO8'pđp}BXO/<̟§ \ӵ1}wYV(j/䗶 o5̽ۻ|ƣOsOb)mooOHvgA 臠l陽\²=4"Cp׳q$ВIҾULmy_WYq$vt>A5zu/-ԳOφOPJv /MߩV~yxFo˥k1Q"F[ \ j iu_xo㰁RRm{>2C:tIl_^_eH"О1M#.щ)Zٓ$M4&'E箇Hyw8&9vՄCFt-݈$]dZRZD]9Hh1E=_ZG %)F'wf :vⲵg$]T'j剳!RӮ݃#77[GI.du-"}K"6DS䄶aim¤L ''( HN; ;O,\=py4>[~VD"Z!)gsj`ܻ k˶+(fi9ӏyzrG&춟~udT3-=i@PxVWExD_jڼ450䚣! WźzUO&ɢvfL?V`a~8^\ޛ%|x ӅYN>ueoz _70\b" uR7e_ؕg[$^(w>&Z, V_~vTl&,yJ~z[ ?A V o5)NpbI*AS!c(Ŝ0& 9{|E1?d[9H n<)e2JJ1fNxĴStBQ (j[2IQRPLK/ !2I)/ 0*/ <˘ÍAd*ctA7SZId8( `1eIL) e\YC yCE.5@D,q2!jUa\cpLJ(o;-aB f_* G6q4SBl'uxTgծ?fe0SW+`\6ȌQ~"F5d79ͅՖć/SlvHJˇ0 ⇫oQ}.T./ڶG6q_UO]~]8/ڟOzGWUX0ܪ +aXG;XVٽ򕞻My@'~ 3k<񼴨s.P y ˣB @E\{Z'9K6K_{Z{&\{Z[!ݯ=wLckqY%H㎖l 3SmiJAJCxVx@EwY*badS(%79Pn\RXO4Ɨxb=m!7̰ӜkWE)+ \jY- EDڃ@crUBI$>[X)r~lb:/I}~ljs5;[X'_XpqkZ̻kwɢ2$Y;ZLR`#@4h+u?V^!h$,Ҹ=׸Q,zqkD,1:۸ qP\:.꺴Lu[XKr~@X_AT?,ƿ?Nb,7ckU5gJ˕ Ky&X34Ϻphgo%9+bj lA(0@ H=2"J$9H 6mu+xԡ-?S);8aX`QBH:mr|3 B5WRJFYeХYGGϚ#E>h}PtOQ ͣfz-fws^jw&>Wsp|ߴs-pY鉥 ё鳆X9cg- lQO~iYGǹ'bG$Outz !ɳ^ڎutTx}`Ep_:<¶ ()?NҀ?*ϣt9yCsT^p[[}3W!󣷣̖UƍeY %_.臫cTūbXϼZз/VAXhWqUcBwƭ Mi=c/R. 9F }na1n劦u 45[r,S>~􎮬)[,uX'vc[)iBڭ 9s){'R&jX NhtM5LŗnMpșh Oizw8"BAbPub8FMic ibϚڭ 9s⩓38^h7A։.o[єuƗsnMpșhOn"gA։햮iք i5[r,Sf:zb*bPkS#rݒՁU4}#Yu3g(R/ n{KFbP4](Tvnd9F⽯A~³8 [,uX'vc[ºr)ﮚ7XuM9FCk T\ި$hvfsJ:4srhaL\/eNΰìk5̵Rlp}8gD\F Z 8l嚭F%Ah=-sLr}OiT8>6;e-Z\ Ҩ$lR pr@hT:5NHcF9cnTة=5S昅2sQIP/,19ǜsMJAO%#4sQI` /,59ǜs̍JVbx9fE9cnRrx9f%y1shx9fM(9cnR4dx9gws17! XVs9ܠ$Oݏ%s17+ T֢9䘁8\Y MI1sp9f#9ǜsMJzpwqìY{G9|9f)cX"s9ܨ$HBcX3s9ܨ$hc s9ܤ$rI9fA"9ǜs̍J9fc9F%f13v91SDr̠@P1!I5yJ!L ʀ_0O?2Su0b%1Vb S!<&V-YzύTlJR S^0 vcP  I)-?*4oQ $HD(}2?f {?1Z ŧI% mKSحnvnl~i=5<8r + Z 2cƑ+kY,l&f5gyfmQ5EP3>X%A 1em[Q[,7g:'Q;J0hr8\XVJ橖P7R A%qL XcCld >6*6|0΋@®@B!+l$ 3G _+eZa2Cpdd?lMؒ#2,_[ZdXYUX,a."gT3R'8k/BC2}.5LID)ktY@>6Sj#%&"~!WJ9GqZJ e+ɍwEqqnb?gH0F!M]OI(Л~̕*H!5~h /|VVDDp2u|7g|9{_/noǓWC'ſBſon<5fdVR^zpw7(djL8>|#OT8xNl6" ~:&pJ?8i##$l\Wbfts?._{]%|:3Eu^{ Uwo-}=$K=큅nRWF_rw,3-OTkX.f:޸9q1+_\?~/̞?*Ëat'֌,oD`Z}w0tfѸe{  !3G"d`MllC:Wwݿ NĎo읝M'KHvv7gl?{6|"rZ*%`j@Xp_o'_ks.˕&;sA3djLUOC2Q<Žaed /_cZuـ Q:X 2Mc 0w\ǒ}D1?&>~Lyv:j局r%+Z3uu F]qg_RwEYz io eg=ԕb;8.CY@kR 9f܅8}mP5gݙ \Fı? D.s]K4( tc&$A{D]wU[`giS0BZSωLSrTJlΩx|*_a\Uoӎ.o4pzy׿,!=w~q%~6u=l:?s>+g_ $ǟfΛm!&2tm}W5F=F6*#VkzV;A)Iq+)RJU!h9LKQflr(C!GngV{=/Dvs)8۫$&' FxUHߢ/1[o'w/`@ V',Ex%)KٷS~l_#7.;[rq:,q4%q6|9lv]y4x}x?e;ξ7 x83})+zW% -ʅ}|CB@ѵav{d<'ԏ+Ri{'νjqܭLs&mz38?]4=FT!؋\vF$ UMǒ$/U bh<&"!:55<$~%jY9kf?Xw1j,!kj0ij&2x V@9…{V.ms9`V /Jwnl1AB4ݧGE9!Dߥ {}UwwL )KL" $ ?"P%'>DY t_ebx'$ >άd=حuW-3cP4ƳvZRϪ|Xu5cz7.*NZ>s7iIN=[>9H`곕< "/.?R=}}͓}dXtoCv0F 4DQ>^t㝵[Z&c0TZd0ȧ0:k>V|N`qRUQYT},۟$Z EI/9~|-f۹wG QgŸY=w 7]ɔko#<C]hDt[h@"qbЪdce l>^R̤㙁եcPMqYf,LxIgB `|h)4@ZTO4յYH>Ŋpq)^Ɉ# )İZ{JVD .=#11ixmILߦ¶C9Pp{>4 ]CR-Yby0 橣[sH x!zr pb*נ%e a|jSNLSˤ^)ʐe.Sا\Lh(A=EA ؟d5|sD^H d#q$ďWi꬜GIiq*ZJ8Nrʺ+Kj wGwT vրX~,gUasC &i qWWWʧ{gv2ZR Olŗm^aW+^H=pՇc{e9Koqq%sg0VB<S:CM)Oi$ JBK: edT<UiCb!q[i;ixt G0F/=yQ#N՟B|F-} j`? (_`N~F04, P-g%bgI}<ޝ<޶CM'@a1}}|(n'0 +cFß8TwUBRNl2Vg8M3tJq&U5Ț `-li](@x4b;IfN(ŸSBO,J fȫL錁  JŮkNml-WV)}'@$b4X谽E峐$0.fdgy%c g6m*qTvin>y9✵2;%/vM&y%Mm´UI(3A';xpYqcpB@g>'(]/[Oq_<ɩA;HSuA 錧3jx51s+*x §TC)RTy,W P1UZ3'x(T^ ?B?.Y?nbeo%>Eپ {"McO-gDž糣u>{j?~AcH5?۫^q<!ye"׵e"ʧ{dQ&>G%隣l\ww'TkGu4g7?&4 :a?-ַ+ [Tcm E. C*.f.&77 ;0Ʋ-P#)#QP5[ |YΖ4 p5|aGP <7s|ؿ-Vum~5 .f0e~_=__.G0Y=Xn!&6#Na r ,C\K-[RnMX[B3j5Kd-GfOE (bRwYJCRDi)^cKpdirS^ih Ib! ~rDiR)fsl$!Q!xfh7.)@+ !VnkS MA=n(]i.0Qƒb6ZS);-8%\"%w)uoDDgG-\_/`J/87.]%4X(pe߃2sf|`QvmigdzyqѸDP9 䑶Z4Sqb^c`:m7"]xT~Fki:Eh#xz0k̨@q=|Jq2gA稇+U\vF_sҎ)hiGF_W\/v^}6 sf3HKNLCoi >7 _gMSvX:h`Xś/:wdWoo=/V />Q -\ |Ū,gpaFuS- V\FuGP=k^=oص^G&cϽM d&SxEت(O!-[[n6mx1/|EE6+2ʗ1? /7_*q(>h*24Mݼ|y~24T2gPV?^e\9+><b w)goV;e-Iȟ\D%mSVQ5 FtQG9Q Z53*R!!rm$S}'ڍrOA5 Ft][s7+,3U)vRsrݗl0Ht__}#bSÝs6ǵFĭ^j괱@}nu925a1w:z$4lv$=E=r[l=^\Zx.We/>ޱ#<>Pdg.YF/ZMf6-SLʩl.vVoj%񦷁< JmEW>Ԍ􆰾PAJLбd}_v#NJwf'aP'ք\Z>) 4v|EJwd6ѧGUvm9|oghi5.2\<}:N)q> 8Uk)2ֈQOb`7rw7&2UGB[׼0E-ٛ?hԡ8w`HB*$5&XG2@&R9Ll;+n^:j-IUH(ZNoSvo&://(\U$3E%b"O6fc.$bwz#{jsa-blYޤNW0 lӮJisɽ2@sL#| )afM?De*)ңQ-3SnuS1 贫4spTc5d47|4K{6cK4((C2ZLԧa">c8c˸Ʋn'HÜ'<8SmU`!+ j1QK#ϩQq4?,\.oc. M_t9Xx6+ֵ7~~5~-$]m9-~}6Dgl{ Rviz^.9h_qw:+k59X#DЎ 9zC$]'>/q)]'VYωn9;;h^E h"A1m'RgxBWUZKAh:u\88528Y3xfy\>\^ƖSD(#=|hcƎmlYB5t[Ba̩DIǔc !\[d=lFz 2Zzm&ԭIIo5$@#AIwHgB2I M(!I4FE㺉`eT`p=(/5\a L!H&Q$@tɖ4DPW`SA\t/ݛJɆ^@Pg\oX~ i[&(^./n?u ~zVB P@[Eψ<}}h⛹/yrSWC7CZt YLbnIww|`U"0YI)yp{w%HIbrߥ7bE(Bm2QzHQ)z݇D0JGXR81x8L 3(6N-e/,0Qw|*]gd>r;ClO`X\k6+R .d"0yC=0 N1ma:Czܨ`I)`\+v_N0>Bc 2TTkk,s@͸.u9>/ק1QIARN-N ~);CqGke'|fHRLkìtk^4Tc> +M tUwbqNAv2IQ{ܝHBll5G 0`yb Ex‚HK&+$jSxkm qlS^(d@9b}2(%X+:A17`?)AuD+E1RHҘF9]%eJcF+ Fk.@ĪO?VKBG`BEF @e-0x6TY?"\Ց=联JN8"\'h+X3Bٖ=({z&}9.,Dž.?)thCWc`Z3#6wjJG5YZj@S+uXa00q40x MuXQ̓ͩUZqG"ġҝ@ Q`3Um J@{abJǏ>5Z`,Z!=_ksn.o>5ݵ}n 췒8H1ܩ'x1c1PdŬe$qWGvbdh߀#C1"τ5$c.7e0wkg&c հugX33샚2%yB5~@tE:f,`UgIA J[ ~ijIR'mbRo{K^ :s񚸹 هۏ7I%-a9niS& cp^I2FE6#Rv 6ڝ4!=^t$L6e^5y&vX S$=c9+yXuuY[cœ}~Tcg==gA/>;G f5XF$FEGgsylZ8U8FgRtoi49 c$'zZ'N{B?&of_&0bFznϛI9ݧ15AB2= ̔7QjgFi E2,bV;`M`6KX]?Qirq/;uv-">/cr7p뙃jk)i[ǙyRwb`yήW cjH|Y\ />|x]A|#]hoh6>[uoZ}tT{d[F+Qʕ_n; R(EJiCr۶ގ)7sA$!/oA%OdPg 7)J$Lt!@|r;7Tze1+"w}4(Hg Ԥ\kB9l ;ٛǻD=@,zOޮڥrtbd9῍CՇ.?XWAb߃UKM?lƢ'aK_y+(1N]wg/qN`EBVy^O@'ڋm<|47z9.WxL$"IִlQ;!ҹ9mcmk/Ofu7>G*2)yby7!XgGG~m -ݴu(,=t a8IHtt C5a{S\Hs%:6QpF[6tj#kɠ׶^o|V39#J󴿧|7IiwuwY62P{)15BOWeTIA<2 !XG,GBsk-:- kR[`b*{;zݶ4*ƞPN1H%JYU^k%w#K锓.7Eu`f8ɼnFs9W⊤ham"u5BXb4 ZǍ1Z8i&!yڤWB {e.(ĢXs1 9m&"MaFDc <h%\xb";pe 1( 焭Z|5macHx@vtw@xLw,˱n7U30;dq6Z8/59¿4*eДJO7O]AyT~We$5n$׋y?W@^U|||sE R&ȝ@AS A["-TP 3cI ҷ+ʭ>b0o(ݼ7)^y L2Σ_2<ЊFk8_EmH/kN 0b5\)H*άhE0meF:ĩs~395%f:zȍ`R6~pIZt /L*7~k7`Fq2[Q+߈ɒUρ_ `({4+8S% B&,If(֮l\9|P|YM$}" [A %*Q)I#!.3)df%ɊLgu9mr䙿yݙ3 ?LmߩpM&aVYe"[8BHQ(%-ֈ6)2uVeL>in!.heoPjLJ+_.p?/-!+YvvA8 /%apa;:4HaO_˿->?S~? Йyh4}xw IՁ+?tr?>Fh6lP ƲTrƇ00(LCY N8rzI$Sڰ\IWTGrm90(̽['@Z\<81HmF! .fvd%4G?)]uvҷS ҠQ ;6qz Rֲ7lmM&'Ѐ"0uKQo');Ҏy""3C`рGv氅6Ii,Shhmhj4\WΤmy\;geGc5Cm=b) Խ8`-FPcDZTFe J1c2-/$)q[Ȕ `TQ2u0!RZXCZ~X63/D2M G)ˊ, \І EE5u%ZƷKF(`DkF_YQTƣ2EmF4)1i!1LE ',I)6su,B1=* ,yui>ja5r[-C?7{Z3EO7=zHM4ۜ_(ءLe">-м25LQG}G47 (TTx&D=MK|.3GAMspۙ&(4Vzb7#|lRpg%=TˁTg7Brt-P PzK) _gHչ*.PKZiriF5; S;;?䳖C^A~AnqTI5oGj75F- _5 (߈} r xqo'[h M VsO]RE͆A?Ѻ;juO,H@Qxg=صVH9˝!oExR@pzz]UK6)"q:h$g{!M+Υ[tYtkC8Fo I7otA>tۣӃ4ڛt)(7΢;1`jIrNzvz?LJS>`8E=OM磛|sEϮo>||/I`v6o ܐSv6lB! %BscIh8acUٗ"aTWͧ))P A4!!V)-BA,/,Ͳ .2|/Qj>`'p{\_V|nKY ͠602k{cW3-PSކX?ds3fn[mNɗ2w(|fQdN59Chr4-hFeP2pĬGL f~p*|vpj(th GT1*`Rispp1ßZS|vAn RF=hکѮ3&䳞$.WSCvY_;xA\2gΊb+wJryu҇:Vw:\ݖ{:Y?Y7b(Qv:ITsR%1$t礍-$ ?V *K>3"It8ulUyDl8VߺAe.{km#x7gU+/M҇Y6>eI ̳3xvc91FL?q&f*Q-$ka?-D)ԑD("s(bɍ,@|UPNfn[qd]ɃM?#wɗkfȔ*Ļ lMD.7|XV0xޢ+ChV+&XF,UB\6%,ԅvj7#r74?|Hw -ٙF%; #1tK)h{۔SBDH'ڙ,aRZTW]+zbR,ݤhK!&BMiuWLy,gBˮ]F#Fza6͐/-{8=d͖g1wt^s& zuw]8g@d$9"VIF'6Vhd3xGP'I۩ӍkLכk`N;7; ъtɯ9yӌ.YUg! R!c֑LÎc9f¬w؞;.nyeGdoZsݴl3ljb͗d6K,R<ػ1wxd?f,U2LK^^O<;ʦظO^oK0&˕Ād9)!-ō۟po vr֯rE긊N\[xUC뭉8I+Leunr .Q[ Sړ`BlZAx˨=kJV{(Na2AI>@RѶ8%_Dz5j ONj (xgS LR֜yLw95mF:}&OﬢTkE0_RHT]S]ey=r2r ڧGHxH>%%<he@PfcIlGC-RuYl%iRKlhz5vc)5I<}A1\K*>31qi4;(~ m@62a֦̋tMU!$&Nދا[n#7lNYw7St!Y:"ڝ σ*-+34|4 7͏a[\A&8ߕa8ey?!?I"vSmW>vSmWT|||sJZ,dQ0 2rY(!1UdTl*qAթFz "j=W|7|8d룔nހ@- ԧ5 H_GERnyyurPHAAX69(\sH*DηC8cXW47 AUoXD[)*3,1)QE$<[N,3vieYވ}Pވ}xYBxV*ӧ}.4,\q=1d ߾ЄQ`(fnLS *`Xǁ@P2B ٝyZuaBbq!.Lf8>E<4 d9Bh\8fzTVdLSbI00jiitЮ܆=,' $r Ħ ì, $gvY59~ ƺwQBwL_m^YǑwmqH~Y,abq<,`]$, (ej2-%/6N%-(ӗWźXTŒS)p͹{r`I>V| ޝ-YuE4R^O!5$?w7O֔;ݧo_ݼr#q{Ûw_Qisyju͌?\\KyJi;EZ5y{_?\^hkDVjDVǜwϼq3k4,[|}Oqh8c>|ꄭ`ˆPÃ_A)n!؋ &}ijijijij!)nm(1R"JI[QQl`8H6TZ TR&þqMqOhR{:-xhh# qJEtx@HQ`9`5Z;)?%~7;R8j YЩrO3KY`Z+#!h5^و~,0%Jo`'>xcN~2f fSW4gmJ-HsX~ûg 4.XwV`ݢ9Q՗y'S]D'c4D50fcڱQ-`gRytMEbnXv;m uCH˫h_2T"XRhB Qr#XD?Rɡd$[UTooCZMYUJIWC r֧[KɈ2ݓRy&a-ξ)d/*kz qe$2#^;fYζ9lugӣصL+s ZWR;ߧnČ ^<5ؕ=٨55r1vIRu$._|H.0bg(SSTY0hFCɌ#N'jTJ Jzr@), AJ`󪟑1X@Ddf2t2$[dOp-qfbԥ9} D|00\$J**$~DuO3Zyb4XXde!: ZUU`A"#D)Y(U`EfpݞY$ x'f*d32kͤEc0DM),)c9`%2GspJeq"f fF\UVMǤ\ j$Td1Jyp'ft$S\6r(\D!x+6Ҋ/M5XtP';|z 'z4.:5(6:Q=pn9{G? l*5eZ9`F= aVd76KPu"WX j;&TAx@5 u%RF( ao [Ko~Wܤ״o n\n庝Պ ۙ8w&~Sg}m l%Auثh;.VWoܝ8jwVxWu#뷯[a]勿\^ni/>x~=}/>~$&vI##=uQ?sl.T^;i'U<'yHqV Zb4v9  )qeͣk=U d}3W8iSg<by?L8}DYY}T˧YnߨHA__/mRj#A3yvS&;곟V-I| -om_|u1hpQ3ymnN}n2nNn7ӻg@yKa.+Qu+wz{$eZMa –z*goʛ%oE7dN6 4Ѓ݇IktHw?G_{9"~WVP}mo_UCAߩȝt2i^~{bG.S0h\4b^'ڭN(HaO;G }v5d t6ÎX8ͻ\[AbD:^C)X ~⧻ {3(8a尫O/#2 ]3Ɖݰ45uiʷ;zΓA5y\zC_9rZˢu<Nk<%E֨'cs>>ynDY&gCSuaS),ۮ,TBSnQGk] [ +hr. 4Ok`<4܅'[='N5i^]m畈V-qOݚyL 0ˋ%RuvX%<%Ou9(>dZg.=ڮ~ZMkYRU1%JP`Ρ]lu@evV[Xi>L.Dkd:]8WYQ( 3FX-UX$ &M,>-ZA0b39Q'>FEۢ~W'PD|zȎwx1-(lԠ#PS2Z($sbDySgK߇ڏA[r<b6I2 YVmD E[mZA;<6h?Q%46般)z bLqq·e`WDZf' D\2H=Mf( }uV:Mq-!kxw>5^G]ҧ@K([hMYK:j7I԰%}~kRH$``V|7Fj=WFlw·ZK!x/.3j"^/` 0R_~ln[x&J7:0L9*9-0X xNi2T4qc8Cd$=s۱szDP-3cN3DMhO:Gf`E9$hRRcpAEc!'[*"eqXVK sT]UbB4RT+H,@$*U[ \ %z7w~tw'`M#)';q~r/&']X_(;;_Ngamo̒ tgʊRNѦRvr^&7I6ِ3W#l}Vp C8n/N1g(M-.+FeDHb"Q05"f*STrU=ctr7v˵Z-v5+>-9%pGJvֈIb돺>Pˍ3'NbK4$ "z~?ʪD VY7^Ƴ&gMϚ4 qxٗl sI-6ޱ6ƹћS𭲌lX+dr/OhR婃oGiۺx@etK4VK? КՌحѱCvqZ :%CـQ'a=%"Sk^wmH_1pL3X`f~JdY8Wd'M)& q:fzxۣ_JPo^n"A)ʘm(ӌ!]d=РʪϸB]W\M(m˼R^`t &M=ͨE (xڂjB,T{hσ5hdO=]);դRc"u+KDQ 'Y* 4B2Y});{7/R Vy)j?D7"K95hRBPAR,.K̲Rҟsi p70\phܓN@ ,y-’)i @격43MVRY^T0^˪pndZI[)!u5rM[`in:xD'7Fbs/$;qSe)#M3ND́Lg)dKig+3ޅm+͌]ϝP skGpWҫvE[ 7߻;=g,fC(F`%,|.%=-2[#'Ymĉ#UX.o3ECm fSϯsDs-77ў2y D?Wo$<2t4] VI71-^J:dW{8<*mIeO V&WsdT:ըvXoc1ޫkqcmmV+SQ.`տ\Mhwr[~*_R6~{I^s,-N@@G E!L3c<-+\a<5і1Goѵ0hr/){xj?R |&W| D񞢦A S 5OX Az A>wk 9-gkmĉ-3f iOPmDu0} E-u_*=k5PqK5 Dn!otgn0Bj_nҍi- |@s65O*ʄϴbq52**}k Xǟw!D+iX#<)řx OYl $eR'h O24&M^^r(8jRjlV o\x0*ձG/_yQk]Z&K\Gi1R%N|tH*:nYDڈY^R.㩴kbls|`Nϫd b hR^M0-URdϙE*AUj]2n2U'jn*VLhYjݘ#_֣0_V[n-RE*T y"3UEQӃJ#r)Ŷ=/^6\Syu!KG?C<ZKR@Z-4H€52;nlFX@J =ՍDxu7M:ȁ|wrWA*/YP0*yreD*3]^17)˕6ᵁ]:. Aj{=7 zR4 U^[+{-'@9h'㟡ȑKm[coZuy=cnݎ(IrQ{u{.dtԺH>3sZ|5YF̨ Nf‘ s+;C)kȺgrAWYgX}zD\pYN0G`ч(m2~8\ﰈ. >O~Tݔp˒R`|Q&*5)TFn\Y}ȕc[}(h>?dzKkE}kS5^JWE9qfHس]@-y?Lvc>J!N6 4eFgyhH]0iTX}s9v>~wF[scU1JW|zŕ 帯!twqL*?ֈuF˂j1qe8ny"x;EvH5—AgSHȒ%ʀ- 9ۓwyE4u usFeo͖β%;}vULtr,L..|7~6cC  N)s%ыu-Sr;7` 17UoClN)f-5ݠ_V~hC|U;t/c-Ws/Jw%Et8N'a&qrԉֹă/qk[{֐_g9_w=?(sZpfkh7?PEd "v(;k1ALPTՔeIVL%6wĪSd7^ZõsMaݜٱ}*y8_$S2a6̒,U<1U"䒉d7^<^4C \Iu ń@,K* Ew i{#ls!(!z"5tr4Tpi_ să1a}g 3#6 P}r-syKo|`9٭G]p2zɋA}>CzP"ճTI"3TI4h%>aT0v8 TdcɈs;Bp)&_Lt5F]\*V `?͛4ɊJN3A2M,BɞoMGnă*\RRv:ձKׂp-=WB?dϘ7ѓrEX8t4>A*(Xc)|CĄYi\{ V(-\gEb$B4dIdK[Lhi-'%E6ewr#GN$,"{R|~Z/a=o]*W$ aLϚT"{D"h[Yr82*E")S5JmL5}%׏kצl7f=gjC/ȉ$ۦLZ|#s۲Y5fAܘZ`[3 ӢGguw{9i҈?fs\ci}M%JJN¥"}Hs̕Vޓ6mdWXMZbhlk*qDLRU$%$@A+yw?>lJ9[c%Rr- [kJ# /Yd)`PvF4%XT9)B /'֘#xXOEakNHVkLØ|9ndq7ÑLD hi"9"= a,bvasi&P99F|%|V FL9piH̹5nt%J^·#,Rj(h}(r~8?E 8ܤ+D*,kQ\^ [I@dR2$'4aB)Ve[8P}[vaRY~VH 5sTr|AK)˄7I|5Yly2ؽ،\MLy! P1*"5Xᤘb: 89Lv^e`< gURj\:ii]犉[ڻJQ"JLsUB61zo"b)"KmLPsLbnH. ~)΀bUM&f8f䱩k+kDDQإt$m4Qy`$SPfk܊,$OPXLڄ<.&J4)>:>$IrgbR%X)SG%r6Q"ָ qgxE: <U+DjGj=> MXva}Lj,oyMS92<%d]aϣ=H`8Ϊ~=XzF~ܺ1֬͘p h΃X&8M>h+=S-N**^pXAhE§=c] Z02D|Te׊1x,Q2>(MkT%N ?Fq]ImMbEMT~ -`VƃDH$Ҕ 歍)Xc<b:i9p|5m3 (%5qS81Q)88ժ7g$HP* !:;J$ iF7GEB0Bocc1B:s-[Xd2Mx D*GXv4~h _ *uB~@=MUrwrd) ڄu :tu>%R0U~b8"ih/-Ś$‰5h.#Z_66pS^UԇS*p )L>ëyxBŮ>r᧺RY֊C$Cc6V<THBu:Fg%7Nt]f a6h*ό"Uqj8zWiFIYЖSDQv mNUfZFFhŸ&k#=.,L~Gˆ}.u&jMU+HaӯRTޅ9Z@~ ėR?+Fu*[ 1HXm@ t0f#B5fʌ0i[Y0FJa)IbguʂΓ'JV`~6@ R~1s*K9A?[cVA~FX:F@Z~IevE5+.<@ ,Χ~' 1#ͥzGiD@L\!Aޗ>i%㜲."QYltEG2ZNWd`W$`S v$yԃ@_88V%fU|I๵,Ym O.%遬r?5-]U^Zݚ.]E J#9G%]];3KÎ^`;v{(ziT?`o >Ba&)+;FvM4™̥<!, M&B boY-PK+/Qx.w2,['C2ER=R};WQ5:6o/^P)klX1FB-Kc2gv@D 2)-?P@"}Ћ<=lC/6H }.y乼%I(>|Uj "t2WE%=ZЊX #̷0C`S]Oፅe<ǥ +H˹HK@>֗3[!uX菭_Th轶ls7;SF_E=lƽu7olp=cLP$ai"X,Qw!bABJ$q:N4%.6Ds$\RPD?d~6ڞ+ftnԪP?1q7;ѐ\-&~N[ݤK8Fp'UN4N緟3hq?_ohorEOgVHD*8F667I8@w5  A,e ӗ*pHЉwf<(-cAP%8q- :uъ%TCJȬݫ?. N%3w3H186ğϕ7nϑn܃!=>|f~ ? ͈I$`aĊ!X[cc!"Vua`E,UdͰϑ|={[q(95 [BΊ8$vQ dddm_Cwy?>n~NPڛx1믛9| یgd[nuޙkS4}hoi暽λ·w5d5{9}=?{A/;_~kRg,W}sS(iړљ~k&f|-C WlU(se}۹<BsYypO٪Cu0og>ܤŖ+_Oꗌר߇I{sŴu]~lvlv̞;޶뙫}NHm7@,&7l:0{`  ~-nx Fƺ>mwu^tSglesW  ;=. gY;6}7wcSwwuut>~cNM ~ȿJ skg#<{K.ߟf죽?0B~ ~n8?6p1n*PzB5)c*~{6f?bg>7@).wtk/ Lg\g ΈLg55%lv^N kMNL3ݡq8uv+gςA.(>XdO,ۭS-w9h’?*fS6&nAHh2u]A fk|==w30q!fȢ:mv g·ֿ6V:돷?.@ª)Ɗ*K0 ܢr0=KQ%G# _aTȺaQe XT$ya^ $s eqb Qk8EC r.~p;'B_-V}G"=Cs3K,yt]*|N^CA' 6+M"ӧveUY17 K|%eՊXywwͷϿ_.j+k?|/o'Mԯb51lcGTV|JTz^Z- ʲɏ߆(]󜲵ߕ RRUR2WJJR5e Q] 7W7\CEvEU[T`'?zJŀ?t\t:D=\b7"G+лYq7]9c#GXNY ys,eL 0ǰFMӘL{/ *1[ޑAY_r)'ʪ` ںK/Oi_pqٗ^}{HE?_u(ײ#}@ :1oqCLg1. ) 񷔃PP֩lK2L: {3 ]9V}*ipdrЗUM$7!'L+.D Y?%d}ڼ^Nr-zA ^s3})wD޵yJz0e_E?qMFS kIӑw+~R |~M~j[{,F)<Hf%SD {v~+hlcG֖~ݵ,úF>kYݱȃw#XkrA{}fQqƬ^`ǬGaz=]d7?SٵW_z?D{h9f~͆ڴ3M1`UNYJES,&ht@*`q]o$duhNa{4i3:5wL!,މDQc,hMjbeO<}qI#N;sݛwz4 P9 \K@KAf]: &fE i T\T.Y*<(!)W-|'%{H7NJ%@(Pn%pD`'7AI+ÔZ!8>\5 E4F% dP$R( (9Pђq1 NQQIn$>21sFC\% N܎P]}+SZWGr]AuV.k ~>LU>FcRB$% A9mF)IŁh\~;;wz{]Bhly^:H bd΃/΅UWr"iQUX41lh7/9*M&ZJ9u 0B$oS=~?oZ+[1`ƔrID#FLYпKR,j_I`V(T\7+ RU_Z VTWk ~;tͦpщb.9+":u$XRH{$(2q6.k]tے#%?bzX"\yZ~qQd3?nW2\4!r(Tt1Y'M=EW"MmaVY-69 r XB[m\'ٮDt3n(8q- /}>% އڡ Kfuw 3AW2 >d52Y=eէ$0 k  p`|mv$3SkQHm}3:i;YbE%G"ocۼjU0"-T5{+`F/݉Ѹt!# =P3$*I ])P›CON;=3L]ܭ^;N/5=&#W^Ʒdwo>6h]V˾V1%*k˩K{vU+E[O].pyǻ6W ro۵ y`溂r\Ҁ"d>e􊠗1ۿ8d/W%tu&jGruhJqwaZE7RR\)=Q_fZj[E /kkrv]Rm%>R[ZulZRc݈> lwI_+>/ި/V=1ܚا{kVPmww! 7~Gܛ]D=xisϚ Q ]QAmi7_-Jw &7v=t[{H3[ZNm#&VC91h]nm9ajJX =, J<\ -{VQD8 lZRh[eCMŽh1))WAv3*>HvK-e>@R},w-AܭaVD$ Z햪ISxTT_c=OmVh=E=p92f)N+_8΀ 9Z2zlN `s*Az5]//h>\ ;jv-)j9}bb׫Wc-ێr.jkvjsT' ̈0`pYBVf`BPR$C0dK!QdV( h]>@hku޴,nppQOX:w{9t9aJ)TrPQuƐO%KI#緘1ž(ȑJJHlYK;M],/h{%Y%xjQ4-񓿡+Nf'kew3U&Ar^U05蕒fINqEPX53lj~# bQwblɒѳ1-JFRkѤ$MɻRI $z)MOۄI&ƙMfǯ)PĎC7I0[9kz0ILbE <"Ƃ<{;*"U+Lu=8qOXFҏOerDMjBx ,'ֱXDKFZhr5@6jX>Å=aQ!=oZt'x7QzĖC$,(|H u[pQOXnph*Wm%xn[) &ҝL_9 zuhsBه@1Hآ(A;KZ :[&`g GzšZ%"џW9vS{l4<1DԪ 4--=QҞ*Gď)t -zxJaHH )`P%y0Va`a{3=zYH.=ҶK""JhmG}Y=pv_~nAo:֒,k}횶+xM8;۞gk[JΉѳw_t/򨶮mϴ]{^^2g6o/z?B[jG%=$6 v[lRX;`MBڤ?dBhj>n_xD-c UǬ@Ͳ*fv4V X{*Y,V54*шƹ%0Kl'myX0t>%yR"Q1ˑ¥ Rh$yR $ n[lC¥|i-mi[%hbN鷘TUHB[ &5.9 +GT[[@9IMn4ʶ.A7@K%0jmJjdՎm6;8$ =YjQ^JwC `J))mIsU7%v쑒,ײY3JAtM탌% .!J?(K6tz-T6lPkP؄N~ʱnGKl4=@K~l) j^ S4ƃ S$]oGW9`CC{m tLdy,DQ䐚g$G8əGWWUW F$PZe+{¡L=nuPzxGr.O C7g'.Wmz(-[LR'\;#'Z~AiIGt@ &`jm J,(~ucJQ I׍ 3my2=#3"t#GXiue彲ސlkdSIW*/~+YEzM$H˙SyR_f^joqyuGĽG~6uV/s50(Aq}>a~룡YvZ919&d ܸҴ- h'jԔ% ,0HJDVש K4"mT$2zSAXm@tm]swJ 7[ϒϻWH ߡ{aj>39LՊq͢eIICKD,1>Hz(\$ AM[\;J2-s#MEc={Wg?7B.P2М@b!iTk>W[=+R)ә β%10.\,?EsWNt0P]L]oӽ(cu 9 -Teb@_APƒNRa!bZj|u$YA7QC^g{`r.w7;"_S:7~A$-ʉvV08`ƾ}R]zbJukMUNdoDR {f)tx=:IdYFjSw|"¢*IޝÆDh ]T%ha'X.e@G:yƇWƺ"~R'ݜI\xmmu$Q2Y?L>T^/>RP,ޝn͟S,)ĴL~`%ΞcYswp2.n?\yWׯ%qL/yBru6pv៯Nf"ͮOS^go.Syኋ\oo0٭WK4jۃ7lhg?߾ye[AUAd*QU$7|IV2MoYkHAwW耺C㬟W x7aӅ;=]BLND0Ppy" PVhh'mpn{gH:j{ʼ(>iq4ߒ2?v5I},NCС/ZK A94I.Y9Y6TI(e%+EAvlА\IqL^_ לGaTWw+&\5R3WS᠋bz7h&Wp2tL I _d8vk&f،MXEט=CI3h֒MfzR]4,nOguX5rݦzJe\aWSU .Z`vt!џ*fڍҁ.SNqmE"nOF6iyXk b]"?"Qp=`Oddtz =š=/Tk I}D74FpG9<><ْ<9^;c R~MZo}e1P|Nf}RQgV)z/ҍNJԁ${ע7 8vq4r xDƵ'bVR&p~uiǢ$ʳ/ 㝙9e3sf麙}@\\H{x]r2%:R꣓}J Tl?j霈,AfyQ ޷^φdCya{5Ku5KY\-/ޯW<'Þ6mwVq9[ᒷ`qVAhxQ9 mJ˔0y\sj2/ .)T[#3A+WfQa!"FM:dTiLTJf[V)VU +T6f?Xgq:Ń_'jJKתw \AU qRƻG~oThMGӹԁ[F6=6LKKxk :#QPP D9VN)#fPp)72kB [V yV zX!\ԮvK)TXS1vzeT%J5 o{TFx'Z95fZif-Srm]vNX㡕z縋{6=.ser܁E1Uaw"A΅TeMXj~Xn4[tf9_RA4$%NSBշD9W"/vRk!1_wb\!~fɓD#,AhuR6`|oJGXe9WQroisΚ0ϳ_gg|p QfnkQ{"Y0{O.`#Ok !{0X9W [ȏ%+E4]`K!z5zp2Uz3pz1j NnǨop4n `*72l'ժ7TB!ae"!:3Z%]iT(ːrUV%,DK6Ơey&f^v<]]Fg`4)JRi[(j0T:JDH~YNDP?ǭ^Pr߼}5c\3e+WV9b5R+ƽg߿KzUU>#gqYAVٷ|̳>pdY/rIi//>R}_ׯhCVfaoo2N,"m0׃,T}\\ /c~_ޜ[~0ùj`\uc+Tb#qC6܇ֈK*<\x]>NUyUSeHNR0ncY~cN2wE홫{"(svE6!2QЏJ\m"&AmkjGu875N'Cfԯ~׬yv6g}$"4s<8ISbTI*#;|&veDvᵖkii;ڟWGޘ'K7(GW KƑo) @F-Y_=rs <ѭm\&]7]]L7{gNٔ);eSvnK1ht2Uv"UUYalQzmbetQ&Jzغw}yo5FhOt~õ)mkڜxƨ,Y9{mVZa5<㒔6ؗzDK Uvu4M9/9!jr(? ,VYcg'lKijk 978 Gs%pC$2d={5hV)r6{GoJyAW3tE%2v Xn]G?jQN u߯~n,ˎw|bBT_ZD!>Q2 gs*%͎L{.)d(k`Oy9KO^r[)a9isdf>y~Xc:tgZ{6_1@i& V#nҠJEJ4iK"3ٙٙp^ѳ0NtRA|tyg#56PB*tM禇ϧ/?2@z<~2PL<  V${$YMg һ34 O. !T9%7/~a%KϦ0 Xh` A Jg3)j"t,c&#m[DƱ##8N ! Blʦ/;g9K dzVNTǀ$1 T@ց0b<̓`s }iOWD9r]^)fg5ƪ/Fej.go&⤭EN?s A|' .pVYMB!-;SCsZzo&AW4R`nnLtwbv4{"yvhF72!x+{`1])¾v`8,*f\ф}`u|*Xf>(p?J}xKiݻYI]}{~PJ.olA>T-旯d<o+cSn|rrL5Jo"nqIl*>1roV]o@짧N=VGjE.=IH97c!H0 1DjP1!i{}C_%%8A+wz*cr8\0 vEvTmײieyaef.ˌ.nE{xf娘nop*w;7\&pj= <ң̭,Cd]tn.LER+Cř۞T^nzbP=+ڋ8 ` i;Ԑ^{w-m#R;2ʖ 1e )a%Aq4x*K[raCP'qZ" ^&a; ?]I'XYVp~ŖE=r,"8:IuKDe;8pF;DmCb +?C%Bl  @xK] H;f4(!M*SojHgה;Ge!y;u9 -nim!ɃsФn8;)Z pQKyBòyE]hTߜlZ潝&eSk(!mw+$Ň I+ ]1 I54:uNٖs=aQoWX* lXpuT1.LmGĕwj]Kޔq0dV~_jhsl+{X@f1a%O/iI`fgS&j9nf~C8ҹÄ 7n`' =W*yhՅJJI" Hٖ" F1 !4WĂ06?W||eفJ_Yv'_I0#!y7E븴)Kv{BQJ\ZPc4D&PJőV G @Ƙ*MBd:T8ޡ :R/Ɋ:< Ȭ{tdd(}g}ʋ`ٽʪ1 R9:'ZLZcNMe2i$ B(E1srܥ8XqA#Z1$ _KtfKEƷD.#8I%vP'C#Jm^Yl۵E8.+qV=4h$9Uq!+dyA6P/8NY?ҐztC?S̪m!dӒ;h!`o W$UJre/ N$MJ4Jm4˶osW'Q32;H\y6[Ƥ+wAR֔容bR@$Q)L>}L B"w:- c={^$,ܚZzJX $-`MMWy <`SD o6;T q&jzjN,i`c ``H8q$S4 jjt“$Ts>J9Q]Ԁv}?tiM݈vS8h(nAֲc;/BXNCS=ť,8Ev;ǡHC BШVQ߂}^S1~0-5e>k[6-y>\ q:ZTyKo]f!{Q ͳ~[s-!ZF\d tRH RCq".!%㪶Bzļ\h3a4/B@}GJb~:d>vF.W3#yg'&g3j'iF)j<@_fc)|8;Mt Qqj&,|؊KYh n2"BH@F9 ICTsLrBFǴp͖vKZ'ZֱHxP(c E )U\{4sJ&.wIY;c5 =UG$g>0:[$gA&=7w-@N'K 8 X3@oU V[g{xe7vO7ͩa[1bode,cs])Z ۥqzt6m҅Q8i5SQSvt ol[3vOy,+/#29L \1}$f?ӔJsl}|iOUt0A/^Q9~TtPbZV$eZ.N;@4H*9erY!>H^P U8`)BPX]sy`{z-eLTD[xK%A]0+9}e8sjO+C 6M'y[=ϖ U!{BpӁTp=#X2ReqeZ EO rwL  dP@4eRcAv 'e2/dJ Ax8s?5؉C?#Q(cwSI9cF (쁜䁞d@ϦS kpՑll: yܪ2Q8v$!# j^7Ƚ$Ȅ#ЂfD&K%ײ $@@*ϼ#!FRM;1CA{?K9smSpB =m#np@R =AGxbS}gn%q+()K O]Y&&$oԙ:O~2 l*{ey[,`9FTI➩0\@PkH`I! طϳkGrjȄl^'!o5:A9KmHZ"8͔C5" N(IhD ciadpI N"8!L9jza!z0d<&e_Rٔ^,Â5*3 5^# nHÜ6 :ϻ#RҴgs|.x~jm&qHhY`(uis §}"AٹC<%F%3vaDB3c ɡ}uu>imXR'NS~H`(|?~R]b2OgEtsqNzRQix1'mB'g/JUkk HB@B 4f4 !MRCB2 L4LŔEJ0ġbB%2 =W﫳WM24̶yUf&tunw3șy̓hfb9Fsha\zpjk~`FS}FlFf6~J._Iid9of^^IĊ<25|Z2,,1[>ߩK=(e.'t.7Nr0VoJ 8&\a9DJ1:D 2Y<'WD} !N ?QVūp8.R/-QN@9_do~^.GrLg>Ypvu||j}[o^~17߽ѝz?ӫ?I{5Η7__y/~{sիtë_L obùo/1o#5ov\үS"8*~3}JhϗUG,/=w.?^[]6ުŰK^}5bUGw^?cbPAu7Zl@-T`J}K}[E:v,o=|OHC!-^6H Ԗ>Fg>_n[~ 6N 2P/? &:Vx8Ys+𱾿\lf;Y):;/SUϵp 2+[|{nU8 CɾO"Z_5og{#wm]Ƣh=K|6y`Ib,pB7lKv43,SeER~#Cr9Ù9s90埯R~&]N~$t '|҉Z]}zrr'|Ѯb|zyVtJuo SLـ8tMvtt\?`W_~ ,dv~AQU~MI$7kjTY}Q\7/ucrj`|ՓTa*LGLR]{MGS+9h 6H3ɑ\"Pqy%{z 45KP?MIENΦTeS ͣ38Ѯ8?+ՙ4%+Ҿ8M=+~ce|3lP}@ e\ݼMZ߼zT1tJIn\q8_)C!J%ep~RznW iVKeń~W_ǘjfNc;S%T[xfl ( @pW3R&T9-u\ٜ{`XCAl}kM]եXbiT\^Vݹm,vQH5+gPH! ,H -H\ke}\kJ4ʉ^M>BǐqvG8J5 -xRɒYWJ +y(a8M^PaQ( oKy!BuMSp `$z Stp VKӓ酧.'(iC/@Vo]rr2 ^KBy).)(Fy*Lza<\ pޕJ]zORRLJ+<տk]K`sy T/0cիj`?+T3jV/>lԫ4ڐZEM *ٷs&h tI?Py*.4Pm T?ul5i45&VG7CrxuS-Oq”M>b\REtIMQ0-(ALj RYXLJ(d(,y-(r ]Ti`TXb} 4Ο{C RTB#jH$@PiNJ]H-*$I\ l]JYAU bT9TŠ*UѹQ .p]jBEQ9 $18"+7/+My+r,^)$DWHtŠ+zYd5bH30i Ȇ4K/*VR$%dHkڀr:!@=R}$K+¤; *UsxPTEBqc~=UW 3+GcxS0LiCX6eMUdX/M**Q"uK~MV{1{kmr4+qF${I iAx H1y 咳qBW]py 4{Ӷ/jiۗ/Տ={99Wjz9TaY! C%ޡ*L sjyGLtdGj&hĸP^\+I< I7""rcCFs*YQD1F*Epۗo\ *Y8ib~Z [Y"M6[g2R @D0 I  p/Aq4d*-$4)=AG=־`6 6aSˋ!6 8!7X@9Gc,"fB"a29F'd\ |J` (1{P]mUǙY*aj4於ou͔Q*Oi d5{`>3o>Fu!7x< cVuU)@z|W_.n @[1>x4SO/.LIK,с1@<F055?e9CY5q؞G>M3?YּlPI2A}$۳O*E^84jR,<MV?f3JluL!_&5n*J>{TVTRSH-Q+->>VyȞ5Эy<,}!@aޤZ}Th䈾+C~+VnfaXd*\I NfƠTn,"J?x[\z7I.Cw;}`w[cExhss.xswaZ(0ުc4"J/Qđ"nFֹ (ш~eۜ5FMQ|wL܏?XShoȄQ~se5x[4lv*Kz1LY'R`$|DhG' D_c+y<307qm2)! vY TͷM*١y1 N`7NY5r r&LM^v?5kk:~^`kS1.l҇XޝtL;¦֠3o{& 7ȺJoiDiگO]˖mWќj?/9,do޸}~Cw2䄩 oL@^+clZy@R}P!,geƲ'o's18?eL(g6qL!=.{!*٠7α̚w<|C{j&}6gߜij\ Ž1Y#wAחx·go$΀ϋW2?/{/G{hzrMAMoq({m솑fTkoLW/;=SnnNuޜ,:GSR[/y>$F"iT%#8iQp@kqm<}%o#Dr#0^;*!na@K.;6wXosxgɭsSgU]3 M|(':R4!ƩJ=ۀ{npz"[lŧX=PLT9+HTM)$q>('ݷ0v WoϫW3۫=MNl܃W(Uϓb i F%OHͻ uk.Cxe}!/D=3BpAhi9O3n/t v ;o ʣ>k8pVz%~WBl:w||ho-ړG41$"*' Jm`3HQ"hK =U{(z&m4ʲV1#x̪y q-xwV41+^t:, PXS_Ă9 { &fdJr@멝YjgO^F \PLn2O >pI`ث6 O j ԫd( K=D) ˊG2hW48{nϺwLouCopr;Ad}NYiC7BTxM66e  K+̷E6%ddF).^lrVߏt4ɦ&RSPl2^k|Mqaa nc4$0Aos ()ej`u4b)A0t܅;O8Ubc@M>l/oy5l=+6.@r0;ki3"T͸ii|ub3P5PsFsS Լ-AsTi8}3*wr=l{sMc>Yz;vKwhMxN^]5Y ;fXwJA55-w9W3烦YL˹6k5eM,*2t$ycEmiꭷ7g,j+#8GSL*J.Žjg{㳲 % [<\ٝ=?lLJ'p*Z4p8{w LHDžϛx[s<sc}i*6@zf^o_>/|JV]CDlչ+8B†n`P|&upkNVέFm F8ɥcT g(/a-R4фNkSn5z?/9r˃FfRH3>< Ip"HAxjLH9J8X;pA`iʣO™visjЀ)d?Bi&7<ðE^f+K3Ks4]3K*M 3,rR`>A ;(PY\f7NoREwmIrݛq{/EpK'/ ~HE._Кlp$jStS/S%antwWmm yJu%@ǫc0ACJ&)JB) Stqp ?XbC/ՒґV mou@` 0Ru""iAduzo=icp \| .5PRDM nIZYA BݞZ:"w~:qdUBU0~5%.BCjs;g *sx7orsbO gOo&i[w*!]4*ߞܼ3AJj-ΦwWRTN#cd 4\9" 㱒PCM>إktU+6˥F:g!ZzsCXyZvut))EK)*~޼HYJL}6˜d+Qs%1΅91NpN: + qЂ1)D6$I 3Ik# iKLo0f } TqzsЭz{ -a%[6?fC[]} j/퓛@-F8ëkJpF+Or3\.ěhYPR!)j m)@OpF'&/y/WaY04jUbЙ;c4I& CV/l M[J۫lW㬲*(LBf⑆zf p un/E 4#/(4#^4ƻ}O\{GNVS᥸{9UԀHJ(TؔU< N8Υ׽3}"W"")+ /r`T3A Ŋ~"5 ^3Ýp4o>M"ika@.W!R7 ?1d|:QVp=R9E)/n\tRPpF )VҊzBsA+ NzeRJ9\Ir=S HGPS׈aYQfnh6۬`="A &omH,*s-/icg/=4Y{d,a+}(S*LBBꐩ'3D@AQnZRibr2Q%1E^\C~p`:S  eDg-SmTHQ|%4&ch\j6JYۨymݵ C `ƠlPAҐ!M%Ȁdo&EIr:nf`mDf#)e]moH~ RtɅR\a^@S$ P!<)o1~%#q<IUU )#VтO1zUTZi/CG'*mI/Ę8c\1UY}lڐ#V@b(L0C^\*E6XdNX55w, {xn5iB,)rBR$R;IOwYZH,$CBXcO$}&$|В "R ?qb}@Z:zhU%' q#U<)9Q9mQDqT5-AG1񧊓vrN'MA0+10JtH#.p0f2rJ$C`UJr h."R3:J.~JÝiT@L_~@40(+f@#ۭuO:)٤&#o"4,׎o[k^#݉VO_:Bz2)QLH e`u8~PNEP&uCnkhDVImȚS1ݾ7t*Ma)NoSP\&6S@63ILFµ䦋NH:I?0g4Mg{i\SCPZtOU_Eevi[ -)K;£#]FVU@NCV'Ewp^^Lnۤ&%t66e 5S]YXA};&i;w؎ί \B);UUM-|I+r-BqPXX<:Yֱǔ*ǧqMKq;'TPYoqՈ3r2C n[Xc5\U~[,rNv1#IK;I_Osv9Lp^[~-]e̜O>,xObNo6 2AJћo7e#V0R`)ڪGAX$pssKNc'&a[z^v:γmN&1]HALJm|%zocث򶹘O+vty5X>N6v< ǽ97b~UճP=k#邟P}M| (o=d2wi%|4V-1]G.$76>*oʇەw7w,d(٫3q-*򮗜rSq_a}\,GA+\@}E?K!薌Mi!_K+r{ jG~?5;t-f ݮ~j#N`fg6dZǡx3!N7Ç/j*O$zp\!WCCxэ׵WD]>dW[H`2J=X(Ʋ_tl$dF9a\6j3s>.mp5 Mx~ل˨1zj48|Av0 uíZv#&HutZi{,]@έTc%T J^B5aOd܈0VC9I^ukr$> ׫5 r>Э}a5Q3ng`+9Cxw [aX??P9SI'NmaSfGnDT_o_PZY# vg9v6ەTko+{̨ 3zÇ|ny<$VOY~(p`/ a)9b䒄kg&qg@g8 ʗTUy;A'З f`(& MFE$!My"8b }B!yx3=nzݷR] n^ 4i~|c(Uk) _ä2=UL5ͷw˘POR[Uy"oU]䭪Vz 4K x‰IjQx),c&ńƔ`Dd:VwCeu*y? o~V "j.}'ݐx͖xkݮR׺]/+#G{YW5&c0IZ=:lqWʕ}t^Ow>%<~=*ݯ1|W}>{a摒[~9vN*"dJTmQ1wXb` hWm|z+IڿTr itv6r)Y+٭/mT=紳Jq2v% !_)mL5ڋ)A)s*峉$l܂T#"/VYϻ(or;/ns2j}$L]KWū{<]r}xy \a'94W%,8W0b-7iVCmcrZ]B M.Xe(bB #Bh$بd$KPB&Z'DO ` oAGmI_SsUQ9`*t|Nk{zю馾hV>GTկ7TQEFՄC[7V2J4SOHdY3Lq!9rP`XrSt~hqz腒2j%$7v()t!4br|>;F0*H0 6ȔI\ % @{#RxnFA<4(FBqc-ave@^G"'ry=կ&嵝XWbq?9,8{Q۱6_\ܸa?YČ7粐8y+SLaq9ww ڮʽ>cⳃ_gǜN8Cjl0fq2~O%#{)RL ˇqy1oC*,\J\2FGXCFte wPp>#*zj\\*|TDWN1D#޵Hn2 GcYe`_'8@5l'O Hvbˌi)3J8Wbu[u%0;[)INj !]fyIf]-Bgɾ]Q̄;mlonJ?|Z@|Kjx{>Ƴ/Jqz )E G䅥-ynEnMJOS*- g%4GjtQ eI](h^B.?@[oM\hɅtAZP@ QXC<sJcp-_y6)͍D#Q!2vk9)@ $8pI8'Z@(9R2wEYRidA [N$TUq]IBcU@]0uUDVtbY8ΆLVAsJR~BR*+& `5T;ʅ>A=Wוal{kڦG ||rJ`(d?#$I:'!z]-uoO˯5ޱxGjAlw]6w\ۉgVڼ4dVh4kЏfk _zlvnǶMF+q2%?0.yaJ L ~"#iҒ1$޾WCp4Q=v;H3-j-L xe>wf&q&l_cit8@xl2tJVy%ۍn ET[9ߦ3+٧#Ŕ\{ ,ҭ"&M>u%p lNZEA"J.$j bERl%LR*K P@y(î83΀wgGL*coCKEoj8Q)Rn5:WwJ˫Aפ;޵ ݷup E6ku_mB%]h;G/cZd4E"7^ՌJJOFܐǑ挖*Lв̐L9Kind)f=|jx5bb6&ު(bYRLF:1SMxB+P!#:.͹yVt a~40wͨM.q)6S yVj#{@d"6{GPndE.F.jh+\GWq.8+Mn(6$n$ QJ0!ic N7 #-zZM ]?&vVBNO6 >b;DSd6VQ xŴmA jXZ)촘yvS9gSw/'zFS wC aGh:e;HIZR8RB͙vL& 9K$,Dѵg"d"[ MCj\u(9"&!C!9Y^2$*,)ḻ}#8b=GpwC̉_X'i_{G?4+]2B)ӽ{xh/txoIA_G1zH(Q4 4JR Z8A/k:NOk)OxF#+7O iEե,NKjZ.ؙm &/yрFNhxh;e<5gQm,o883~grT/_ELB$DLBͤa־%WB3@if2-(l.4#L!'_(Y=))/OpoD@2u<9\LBX$LBXVPC 4,sFFQ2+R*@Scdqࡈԉ0u7^uDb.yB`*.Kv~TH`D%8iGmg$1TXs^ΑÝ.&pKw ];GJͽM9 DK1vzO?eҥ-EWvw>d]'+D5bTGlOJo6aM'(g8,4<+,ejC65-dA<kndɳ[AWTF>4tx?Z"Ol+~4pъ`4A!k;:x<Q?"lQ?C 3j hDG{3*c+P#dʓP8 ;pv$Iueܠ aXrsMi%qX0s栨-c-7e$N~6l'?*i=v39 >vZ[l= n۳pc,*.QRУZDh j$he 5OWhE]QFnFy) *Hu*Re9˦lYl bZxé abEᲚDFK}>x+2+=M!z;%sg*w6 T]}E B+U=Y^U?袿+Dg>S,~w;z؀Vh(1,ʘ,7הQĔnCkj }Ⱥo|.l[.ˆ^~T=6sb_8 |D`/Z>~6-fSag'P{9N6I"ppՆye (hXg@oj#xv9ݛl _]ݪ-yySrlkzW}y쯑w{I .~v50C̯? ,B+5 ېZ>;3wGQiBez-|׃1%9Vz0=7wf&EJ^~ڿ?TLOH:G*^4A{LsOsxͮ$=(&^yLRT1b1!<ɜB9 &݊ՊH:AaJNwT]nƅsH! k<s9"踥zL #@ ϕhJ)gq'xxwhO#ld3+uh('0] =?mџO5ե, RR+&ʼnyf5R L^tNT/A×#gV٬5u0!4):̱ |r?i+:eYG| yD"ЮwEhC$=<%@>6jc^\QF}xXȅ@sF[0fYjџ9 ݻ<&-7l#rzn4G{dQCc#:6=A":r)8>8^úGzLa:$Z#l h2#sE%sx!HF~"!%+-dPBB 5(&w 1d 8z0'@-5a"<`?Ps[!ZKjݭk`kdܟs\~5Sɛv; h~)U.04N1g$aī S"E$8ɀ ㌿gښ۸_K: jk;NR=O)Hd;m /D ‘eɜ!>эKB>'FbUiiiDafE/436I*%) à c(=hq"LXURp[a򪭫8FK]V+NM.0_off1yKoGKs\/S/?_pEs#. /L"x,ѯ;0wUo2dO ؖt0>< `])?Bwf:iq1;V%'q8*(Ws1>% Hi0cǹWokMƓ,^$(Fj0{93(6~r0̸e~aq?V_wĀIUC2ws斠͇ߠ3Gf:܎ڑmqaWnAiE*U i~Q'j6E:)*WEUUNڠmX4atZ؈Rp$HYXF'_,;Xz")o|WǓRض0 eVu˦6֕ X M[ӂ=^pKL22[vyZ k] Bp}AJqZ[<&5F38_Wֽw~ _M&Wq H;JSd >y3sģxD-O˗,r= /|@/mݗIO˨!"xK岘i3L"MhN3;,%&϶Lݒ¢_.0 Q! WMC-#$HDG )C(ݿC fAHl5\GFCATlq{DhuLD˞:^4IGLs6X }Lp&Qr8 F 7~l9ѤEN\g\r0! r%f4f64VX0q$))S)$˂|eQ'BˎAjlu$xP ]B<) b"t:ц挳F pv)J2q(aU'GSijJ!}[)ne! eԘqޒ: ޒ 3>4j*y# jQezr ~8{}J153p^.AEysg/RV3Ũ~~6*`PW꘱-Pc %W$z6=TtDKѵEp y bDpLN!k_9Vvg5~dhE^WE1A++ѸdhγQE\hPRi)QC=(a] TBpL(C}D5nk0lI\ԍ`L-e" 5 Otjޜ=q."jnk ^`e>XcvF!D4ymoAaY ^ޗ[Z|8u9n<})r/ko޶ϱBnt\& 0ZM 11S`Tf^v:x*531M3zz9ͭ';Ӷ㺗N@]p}r/}NF-ӆ\6N7f8Vfh67#BXbQL.!M/6ЂK=P:d`x祌'q/'*f)! &c)Qi5I0,ՇԴhQcf9aPvA\ m9S! tVqGr"dp\Dž$HцOVYVTH<&ORr(4]SH($YB,Dj xr1ŋ?P.QctUL.bQowk~w| Ju̟>ݣb^~EϫHFzy[Gh<}LctJI)%sydUd maUcsl_? w"JUSd@2Z/Qoa/gPIQ&2hl~9-qO/ wPHMiE"^<`q4;ytiRQа2Tސ[13sA˧TQ6 -s'1"Gi3"62raI)f$8#ܑLe.3#M VɘS IC{2w*A nsӵp{t؞M fwKS7ytO}zw֏_;xHgUPJ12E,KaPkb(8ր!3 0w6:NPǭH&R')wX9 I?n5Hd:KN8x蠾tTFn@x`qfg:ޙ~ƀIIǯ5J$Zi1-D?޹á##c;賢8%-ʐ8t0A?2=+ gޫ^n'x|yqai c:qdcIm1(spL Lm*p޼m]at4ҚSMe-ʯ$T9aثAk5Xkz0Cz2zPB'W)K2ye{a.4oUI hT\ps0H0qh0ҁF3{NfN9G/ma mR*HlaW?m )7U IӘDFS F-׋<c()<24; '.GP%{xJO5Ut8ɗ?а!SZ VrgFG 觹<{YEDž%rO9_wM:X'1ә@?˸)7>n+qSnqS%0es2%0BXIK48Fy!jA`2dBZϢ[f*Tc_vym4_g46AZ;֢L<$~Ύӌ^]b(EUMvA|(I\2jQpBŚI1߬|v_fU j%mlף-nC{0S4`L0Fq {b#b(ek%/wa]ؗ eL_KB}cfYQ{(;&FԪo2i:ac`A,"iA }pIZWqͨo$+XZ@/ߖ7ךgWo8*3^pcIS9r(.\%X).h+M:X[sXֿ#UEewj3IY4 pp׸ϳC!Ar*1N:lB$S g,/!Dk4m|: Qvr ?Ndq~ 7 4IbQ]iPn.ohOmͨJ0R^㧮0Î/f|z.F]K0ʰh>t r/Fuł/C"RuN֢ޓDiG4lMO<¦'aP;FN,|u'-ϸ Ch]CFK5i7#ܷR(8hyMQ8$V_$ cW$Z LNPJwﰬBQ4Hb)rZ:f 6G IT IQMPju2bHO6PE'IbM)( .I& i,,JNE 8e$E\TI'I&%>WR4ˍJ2͔B$A;QaKIV4Q Lr-`UNfP;nVR-sF9^ :XE&aڪ qdX0H`ft$r?K9(38ڡIx#JZlnU / Y#ŀuVHb0Jdb9OI$UQQ(IAX"V b0+ ec k[9(qg0}L`g s eNs&TcF0VOA);yyDz:sPUo+z-O1k{0OEP(f[D OEC30)烸ĉTbJK3> 'VFP7nEam;3F3Z /QZK)_%wh-hAC8DbDli-H \bT0_0i-V.h-)ϹUJVyw[kف*D*%UpA_Yn*soj*"; *N&obS_H )aO1med2`v(=VBLyc&ZvQ*[U.*!oX>_+P ]. q{4kt'3F['XA:'E(] :ʐkŅvl:Vצ5*Ka7'wcW|gA\fЂW9V3e79;ɸ: ORSZ[.(w3 hαo\MB[8*;'QfkmHKgb ;X18p j3(dNjSM2bgH4YR}UPh<żKgA}X}iYodoo'-9..4CYBi0[pftuf1gL|] e^yYy m{R+,RY S+|Atz@.w_Z@3H !GIϻ ?YiC>aygn(@dnnA?_ܺNChQV(Rxέ5b=s:5yH"O,wP}6jEz*5WБeWR{w@`!#pudɷkGK񥙚D co"}[95KIl}l2,:+Ɨru* ~޼{@7KZ}_;gۍIn7PQhyZsՖۡrwbvt7&~ ?Ѥ%/]Q|&$xv[u_ݟѕMAIà&v}Ɲ%VMKK[kB|?$^-VždezI+‚RJ9*Oỗ\YyYiF m+V~*ir EMB:3Vk= =t7S5EԆw|Em)Èg4}Eh~y#`92sڤoRJ09jU9RKC9bƤPƣ1$HeXxd/2랖;jl+C3v T ))oGH*A*% i+,CdA5?~6FFFE ib1QT9yx!!£QhX+.h5Me0>4U(6iK(fZgZ*$OaII:ЏZ{ G(I"d!.ՉyʮSD v 3 &{?ڸZoRJr8iwY#/;; ^ Cj} -į D7If,ʺxLxDN~ܺB9,sXXm#rHH~3B| !qO8Lf%`Ѳ!pLJKmv12G[;~3j[/GoGZV<|KpT&8GasxG;jHٹuW+zP;yJ`ΆBV arp6˪3eZ!!93NWY$4Z ϳ&;> )kٯ9"E^ =ْ4ᮩ)='!C7݅L Rvֿn5j&a0ڌPH] bs1_^ ~ J0\\;/#R`9dY2JMdh?λ`*9G!l{wV= 1/؜}@Z:TP1/aWUʐ4+i4JTmxݞe o[f\sL2 f1T($Mc v07( ~"aI[j<9@ o1_ fGH bnfcIU)Pֆ*V0uK&}XY=X!ho c:؊ 4j & ^G 7ךyM)o Cxs'r #E-[ Ӿrgf?Q~YsfJm׬vLb*BB 3ߎqsmlp9%T4,"p_] 1;-}8$"DW6k'#~./mO!]$*C׼J\|_FtZ1D Ki,o ,G6jԇw{5S((SP$RU` lcS j;(l6+j|弤>È887Lh:iy;XFʖP{zK:$ v2 rX]/BY2>9ȃ 0*XbU:Rl Z"AZT% ֡o+Ry]jB4Sg\OXRbɿQ/0=" +A(Xe Z x.x"fx76ֿ=Ɔk=0!涕>6J? r&{ N'3dC#X+H",9 &8Ij={O+R0d<@0>'i(RE3[_!5d82t9IGWޱ[3 oyvV l8'9lwU )|kxO:GB ,<%$Ӈ6?{nf mkFPK8%|IUЊEJMmмx$i `e{-N<}k3_)G| L*<xx gb.FL.ga4ϯoop`}/)h4:w]?N]?Gti<֠Aehii 괓NcXwS͔B(8ѿR{.qi6ZΝQQ]֝1`ŷUa6zX|~,>&wŠP99Z4ΐLy:^!Tn!3cnH& %zdwqZ"oy"B""6` Tp#Lat q.[u}Q'M;`,D겎IZ"4bPȈ`M$6~vL 飣Hk s{fAX&Z-:g'kۅDžەH}Ne`D(l$"R0xG Eb ZX0"2";X&}+THjg40 !F]GCƐ"(".qPkύ#}YWhD*L.ח&qQ@W`ϡ\|rk' m3]K(߆zcN_0 `G0jV"H$- O(%Èrixp]Nq*Qy.=t;9HRDJh ƈt.nXN1+ȉ B;I # C  8 %RzF" I Y9yT [XܒH Y ( H&e棥\q)E9Uq‹gn5uֶ3| g8@{2zHFvQ]a siAYHқK_/~RG?_Gf FLm?ïXz>)OಊzfW܊9y|t]f!Go.O+8A5\օE!Ca#WNfyJYF exH,ˆK'A\âpJݫ*ݧ ՘mitrxܚ>Nªj:^8sf* Ψc+&[`2/ 3 {{E6*IoTt[޼Q ر2?޸(YF*֓GϬm@pFXOB`v68[p֖qEd;(vJ q" s <ʔz'}T#2XlI Ft׎M1 ;#&NM|guj\m:ƼL@V`c´(`.6.""y2jcɁF-b0%2:0@H>` *K3+a1Ŵdvi~ W7\o g/,%~Adr<{^B;HkZDuN(g 6Yr' 9׬FEsBbB9p f+ɶڂ|SĦ'Cٻm%W~=ЄK?$} }[Iyڙ[{| pzfz4R7%l-p1%"uc0kMGج" %Q4ǭx3cÈ-K=?BZx: t$Eb3䳪 =aKDf*Ye<jL'1iNi2`1XdL6 YYC y! AE6}d:\TV*: y;  9|zU<*S\4aԮeCL/4"SgÕP3!h|*z [StJN)tR/aby)_Nh6^2\(IC.EF-2x Nd}Ի!o_H[{' *_`[Wk_WW'!V5qqˈ\{!nmg[R1/3 Rm*LQgZ)mmLMtD(j ˛, Qh<$YIAb4'RJ &5()8̟029:I$~4I˳{I>"28C%)$j<$)ǒLY a Sj<.cv׉;zG^FHߣg7|rL(M +<]Gosv=zv#&CEzHUcK`Wq Ųr¥DT `jĀ4ҡQPEbF-JDFn0t5s ѳfk4ޠAN2B{8c1۔IX(,U/+%؁()^٠\F$2|Ocv2d &%˳zo ] t|O+wzBMDolLC>&W |!hCNԆ9(dsRЌI]Np l'# Zݥ`IRpo'HznM*WGۆ{^+F4|!\(2!7cZiMIWȾ ;1aqs&-޺cX'u#huG ?U88|뎏3X#(F8G=>p>Z ߗ~=V|s8ߘ2TN%; #"~a!f E7mc=^pNh6 B"i!vl%:$Xg{]_gÊY'U-w"4gD / (ɯcAXL߱#DJSzm$IiӥeWp3Qo Y#bpd?)bg7brL]Ӑɱ%q L['^sou@ǔѳ>y5T H o'{9L:S~M֗ xB]Caˍ!Pb@HqmER)o$FͰmca82$ &Z!Y`%lO1~ L4V7K`\܌~<2σܐ)Gf(S;@1Yx>}V@9y5PJ 橌-GOp>]wPJLpF/y((2~I['S6~Q=Xjg7YNdnկo>,A=na/#D9tƾas$тOܔ`JZgAu..T9*#VKn|h0J0&Q=C?x {[ Xb4cV=נ2S`$8RY*J 7xam֠:UޢzGn@ I5 Mךs]/H|K"ǍC{Ǎi`W̯3c в+GVAhG1˅70ή?n͋"cX?ojx{a"g8&:@w̮VcKOǨFp`N45*1G`Qyʲqi4{ƛshaJN}Td7ǾioN3騢-μښ<#sy3jBP|jB0.tB-bQԖ*l,N45|q2PvT `{J^B0&(g-{aɊ?H= A"rZhT=邳sh=Qc Q^L@_i;rMG#9Or[ '>]8iMl'#ȖFx)>@` >#m澨w7weQP滗XFIi|9öz_I}EI[O5#SU[萎M )uRv7sІө$*M*xT&LPL\ _./usyw ss0yX s<0fAeQ J_28+{~Eb(OmtIϑUah ??NhwW²-˼;Ҳ.J'@-WQ%ew_̒Or%>ZjBOΘlRW3VsgTmЏ~zJ2~sIDNvÅJr[ug36}Ө[-UL W$X7VIrwKypB|9ցxN^._Fxz8aNVGpnzi9`dBGk _! >8'$*=NmpkP fe " +Д@yt/m¢$,ѦC3 $\T%;gcu(<[gssQ'z&;'Z_vyU͹+s98c&سg+r:}z bE7Ύ1ќCzF-` wWwuqoWaag,9gd1 r<xp+ s6N)JuDnYo[5q0+Ըu_zUI(s'n>A >@gn% okߋ&IHڤu8 4Eɬ D/R\SB#FZBFq8Bi6PSKF|j?R7/[,8]$,JtHb+{ hfk׵hLkO{T.9'~OpG/QɋŜӀ\Q9GDd{HG\ 7-/ZL5i0 =3Uu#|M7;}sAu+xS.)ՔfHMtcLbG~o>iWm<v) #^+ڸ7x̂o%r>"Yb^.eHFq#m\ʭv rzDjd\72@`Np~]i>Z.3'I=N)pJ:#PQR$ߖo%kv|x o?A4aDX) aiuPkJ F|ĒӅՑ t|YD"Ygm+<'PL`@1w&r*_Sp8p S+O})|T;i@,LSuuuUӲa<3} Up>  7\aIp XmInLcSh!Jj+}bբYL(ڻ 7M0>b 3Ͻ\AǃJ nʟP|;?l O0VGiQՒbr*_#u+5ϢHvMC,ufS"y$;[ՀA!Ո c4j3ŜJҐ7GSTCi9b$aۓN ΁o a)覙$?nF~W7f#+pIH ߶)}֫$țe\|/z"`;% tA*N¨M0:p :Sm0/qХx&26[|?(%e o)+Ca/JCJ2yr*Kx-AE4v(Gƕj.j:Ha >ŇvXX7񨬄l=|a_AxQ:ZqV׫(NZ+qjLV w 7^V96>&mIiIvα&Y=ɼK9mfம-HkV<8\3˽)o-la8}\]7cKbuΔ9iwa0XcOW5KP3}+9*9*.P3i[mrdluLzzfD94\ɬvδyJy1og <;7Vr{Ԙtc3:cZgA-pb׾P i^-aΩdG8ɤ~ ^(8VG67X@Ik,8|-dOh1=jZ '_[J$xS]4>aO{@Gnhaއ n8lpZ?&lv|M8(_,-0kuҰ`-A(mtJc/cʵ ›pޭxFSҼ4w)i^k(Ȑ׊U&zMotR14.)D5f귦& L0Syq)Zn yYB-s\L3ڞ1tޏM ]֮5h1y҉&ɊkT3!Zڑl9+sZmt .M?wߤEk?\g~<#} M(d İgpH ˍ%T *}))m7 ʱ&|j;SIp} y06Ɲ($( ,4PY P섲B+]ϼZFk +*uso` %VRdU/m{2lEe:h˾dTg6 Js=.c~ĉA9DjP A;Ǻ. w&gr(fN3bWtE!;4>u]eghZp5jMly[sIRbAcDjn.0rDp3++'kU>ў}Aj'C + &qY(BCzW/T]DEʭ&*Z2!z\k4RJ̊gVI'km?3:zIlyy`hMg1zTD(S^p˃Ėd1O!Owv*} ,Q;mg_7WbZھ9ӯLpG& %D^XI "RhmYA|@h;YEtR":Me%Ug?iuΨ:|,FLDӘ M#-Md7,xOQZ$xk`GLbR rT;Dwݬ[phyų9vT<mt]Vh|yᅡ>ڀ$6rTEvd"m7[>}{kY87pM>m!2!ҹ6/+3PDkSXOԞ~JTCZ,`j>!'kWy76UU&_ß Baooǣ1< S`8Sx]N08af`fss!C:?DSw QRs[ TЃ9|B5l":|?6~5l'[qsyX̎Dpn3RD mMz(8V)I Eo\HAwr%by'9P&N&]l`\W+~.23`?b735DO,lC *m8X`^ҫVBK15pt#L;^ZSKI(Q„5>'ؿA}0-A-WM;z<6Q!,`ixK<[^>8A|<>G8g!KfJ bx @ `i̱`$ cƙG`ɥc,}x|0ZxԋzqP/UU僒 1a%PiPp=2Ti.,1L!pvJeG÷O|YmQ!"{1 ܈F\)qU>>@~e z4tb/K8!d c$NeG÷v6dTպ/F XWͰ>u1 ܔq랅zV-ePBu]YD=V+8wLd ⚗jS!L&9a#TJ %e܇g+xӺV*_YJ8( ڡ2:X`ET'9—FaLS`[uXN1ʂtJC1"QڶrUe#7l,,bYĀgQ 8S ѲВҗiw-+?x׸?Mj88k"2Xp0&46.RaPcpbZE9 ?~[=}9&p~m6C4t z^J6p@ID}0|ټ?ݐB~K'ϻ F[G?]c" 坿8uAl m>W7_\oN?p}$pm řxv_0JNJ4I18.)|Dۻ T@ }ZzZy氮,,3am@>R"[=M0)"h ,;CBg /qi͠$r&M_ K$gB*pJmJB >ե/`Ü܀U`A+jnbyԢ %%+4e@ND>$Ը䰷`Uc@J KQpUCT/0D)JEr?_1H !!`?%+5J,< * bFx1OsӌVVi7t-bҵI"&]juR" O RKx+A J,+]D0lțҜj܎7X3/?Imr1Ψ:"^v3SAU شZKALr/([C`}^\rȹ5atB!`JQ|\3˫w8c  ֬ KJ D עPFT̐܁Jd04Xq`j]p TJ )}/W.[/'g6@.#9 Z3>]jbFƀ $013Kn1&1#ci5Sj`us~HM0KCd7} [≩)iwa0XcOW5KP3}+38 1ޘIVrᲝp@̣{̛^iwάs${\a6 eAKM=΂\{|z|k6 1o7s|Nk}; năQ4:kl8Z[Fakpp Gqf/_ak0B]KW"њbi9CrTbF7RڒA9XE4HLW (O &W(22IocTKZ`d?cH-/̡ ɆﺝɟoqSf4~[ 5h ʯ`ǻWT#ϛތ ~ 7 KxZhFѱR\ iL WR]*Ju=K&8+Kf-9b +aQ6 h4x 1G41C( Q;ƛ, `) W 3cAI<2X L'RuKc2dIEK Xf>T>nÊW#=h qR=ĝF^ۇYǍXglZnJh5?$5IOXw/:fg .LvouWF^&ajoI7FJ.g-p* X)[*ERBl }!Ut'4G}V4_y5Kl܅[P !Tp< 4'v߅݃];r+VNM𯘟7>IGR^#^*fTYlSU\e6Ť7bmЎwxWIbH@uR pR^fd&@$성:K0U2 ^JO[OL>MP]U g>ԘFe6M A/n VcԽ ]_b؉SƮb.BHJԜQ!ޗӰtݶf!Rȩhq!$2w%#Gz4'[xB/(fƴDo{+iD*!_0UO_ZcI2.P&۪?ʨVшbqbpd 3ƎNg!$П+~;n5[.1J#V Ƕ[Emf_kw͐KDjo!78;ޱ͚ك|=h2٠N1l,`Oj׻#[Y\FuPBBD!:."gLtkdOʈ(RNޓ3L3:^`@6r /˧r2 lur"%߱Y9?<ܽj!}bY^ZEXa#6^&;e"0bFDleD+V"Jr,}80OxgCQ1g3wIf<lP3b+I$MRw5Ϯ L[aFw2jm߽MO4.|ƾ0Ȅ]sDq$Up!U~>/F$TL8 Y p :AZm"}Y -)43J!J'q; ]TwN4ug ;pNw^=ӫZJ'3*txw IyZM͒,vuv\)գ0]\bIX_T`Gzi f0yUte%\Q4vpٕ[uJS}Л9-SM;Jq''Ig7y~}ux?x-Wur?lq%+cb:y<JHqEכ ST7tOK3T.}f=P0)E S'aZ#wb 2̢v.zgsڨ37\A8)r6cLEIm*q˕ȲC[ @ٯS +`-*alB%Zd9"yϡ;~{U@)0g4}I觴l [lO\v84E'}ȏ;f%V1d-ۦvƲ/-Bm7.u$拱*31z#ѴQ3wQƦb4(bJ.k"pyp⺵(apNJ6NK%Ӧ~+%Xfn'gj׉ zgU^ Xw]l_tj}< 赠{Guy={,Bca+. H2t*}= Չc!\z$3f)jg0ֿ;z'Re{Ws2|Zܜ Q֎=KVV-^P(TǥKF^(4C 1TyB=R0}IGQj QF/QN >N%ø̘7lL1&~m$yfTb'Kkix0SI.N;0h p[]Yk/`p` P5& "5XϝPE:Z4cPvȭW[V13ն +?` |\as;\_y_||7]%f1o~5|*!Pf1aVwo~lJL,<.!"VV";B!_D-xf'Ǒ;v˅w(zܡkTTDH^Txc ֜r8 Gkw| o>g7O5"~4N`<@ϑ[V F'gx9* -IEz!zV(,7n$nYC [ycWBl^o Y:1:b ]F^Ov4u"OZzd{JIM9KF] W9'3}y.%4c̆QuZ׋ևq՞s[Z{:;'g M6׿^yOO߹4\h8tEſg5H닇h:Si?/K?.1/x_Q;Vz~ g}٧@} j~~O43T}*h?`iC}jV w5kǃE"J}B?S_"߾}3x!^~.2(KG1{= f^b n?Fx{R_XP7[7 y^Ϧ"i]T׫Tgk-xx?-}J.>cF»П?{ܶ>d/YCUoeɛ:TX&W~=K"J\6@ϯ{z=ݣqﳟ?"cQgOQ6\ЮA`PQW#'tw W)(QE?iV}v.gGSIj|;[VM_Of5ӲzTge|g0Ί_Nгϲc_6ըPAv6ϻө̺߁^\y'QߜG1?z)?{7y 1x.E^pjv?yG[d_\jpVSɅ}3^$kshœ?:?* Ĝ>,3Z8o={{Kz;~rY՛>퓳VN|}Sӻ0@ W<#~D__>d]./oz)GlkqLThZ³^aU]`vW0;l&o|ɀŒuQ.p?nz|q?OeWXO* 6gHeV8gF$R\X98!Z씺YNAmutxYu>~CR%k<K2HwXіD չhk2|cE5Voُ^Hl ߊP nIۂ[*NUa>~ MF~+ (i{aᏺ%;[3.hC飃z;$'TmZwΟ y?^oo߽c3v?tOΫ+eSuqi?0]d!cHӛ?3Jg\]W(Ma1J-O.&V)^¥+qM\*Z1?=bĴ?co8?'s6OV1/&W+ozfofh_݈aɛж{.Rӫ*Ȃ`Ӈ :(c237# 6s RZ袕=ZF%ثi/?=Cb(1hM??L (S$pltJ/ e.4F9Afz; *u mt-9%mmj{yv'9ǝkoclF'6+,鹻#ܻyY` ~#a]s RR92+?F/aRDa`4(52檕; pN nl),`l4$hH]\Ujm\_kBr-k>Cډ@jqAmuCzs=IBo<0ԅͮJp.A %]j%pM)1hv!i1 =Hœ7$%U0Z:BlHA1( \fW2>|E>JaUZ(R#nT(C09 " X#Sm Mak rZ4xݥgs4?z`:Ao|s.e _jS3!1N=7;+@Hipݽ~,c7SITITITIQ|ƫh) ^+N[)†I$ԕSr@7Ճ!yu#ZIJq528ےAfhzu*paϯg~iF/ NW{|] 4{{8);(ywhe'EM@nƘ0Nv(mVpX0"Û冊sUqrمkd0)muV.E,]]$`*U7c %6H"m鍊H=yUHIy#]5G׿*O'L;,\Ћ|W%Yp,9XKK0za$%ƙڱ) 2pn!U LH׃(1g8l#z%ݪOw>=-ώ3oI5,k,FmyFrY%0b$ca$bX=mPOc-wuX0 +jpY'kD4Q$PԌ nF4۔hϭH" D|x[Wc ur44<@O|RCgF dO!}@Z6F/'G|W qD Uk]j6\M7)@j~4ngg5$?DGXmH&ob5eJq+ 7!x%\<ko 'IU0x =KBI*,X@MS s#+&|oЍet!?dENÌyB'LcQG0nR)R6B(Xs/:+4E$Zx95xsS3%Map(%)(P捤dJPYNɨ=%,c4#JbT(^GrJ%$C.9 h4Dj*JOy$jH| Vxn\:o[~\QX5k]!vy*Wz :cx[[v[ړ~S[|hSyD :ni%`>|onь ^߀?|mΝB[ˁ-wOӗlɓN~W=7FEbmu{*9:Xjp{gf2b9 n<8]׷2pY6{LzPr4خ0/ݿ\a,bǾwki[<{cTrj;ߧ3wQtōyXW3zu?s*&ڭ;cטdh0܀#z* > 󵉂6 NNDuⴐfrg*8_6^ljHu륋Ό]q&?,xvƶj Ng2Mc# w܃|sS$Ia$a1XKjFBmFpg/^(H\pɰv')_8vxSD0T_£xfzMeĕb8=OӢЩ߼;Qއ~8QZhnm͂5ٸj@u^! tbB{#T*<2c dx73Fq<3dA8ޘMƟmZ1 Zgd֢r&ma>x=O7m""Z Ԡ*nw!H&~E4g5`xRv]KIsr[Q/duGkɳIM6z@]VfB&۱H9NaKAn=` /8{ 5ɁU{L>~yK"KUe. [cHok<Ə1@1X|?cd,k a;`W--ޖz6f7YlmC=n=NR~|vd4] GdO;yen._CX".㐠SAwp3wF)a h_Q%|, ޵#"_8i O=`fi`eJ݃[lΣsC-UX$_u e-莵p=+ހ_v ͩ!y9 nﮣ˞^؅aQ8VPs F싚 JNKQs ʵ% G:$իmŌBeGMfok>gcoku&vܦPDsqrO Ce!)!z%gpYiɹHgk :N AԺwLBS@vF8U=pe{.YДpF(\20befג$4"́'ʄ͠eԠmLA"\pF4 .YoAh)1 DQ㜀Дz 6!'Ubt@^,]j"әDkk"(ƻX皈 J~QA;@PMr%WƦYe04šaYpC­\ގRЩpĒaOHkidiNcH۳ތơu+A ^vK;5kFL_⃊JW'Dh(`oh`YS“ pOHM2W]MoFL_I,(;7kgg,u]DJ'Q !\]ZHgJNy Q#,} ^ vb}^<م榫'l&7x@'t1|B+a]=yǍ/U(Ї,rdGNvvqv VJIZշ`\f1[-.IRPR%q~gnBFbj&@Cu\l_hA3\#8f\Nw;HS*aL ̈ACݎګWb[_}nR+t{"'@@x‚2)+D6Ad,YMdiV4im-~:eqzr3*= ߵu6Y}[:B#Օ+|uBȉBrFr|8!_j <&C4g3|29[Ae# MX\c㾍Ԫ ~~! (A֤s66;_kV_Yfklx: *7P0{;Fu֓U*&KT~Q6Nr.}Ǔ\fZ\3bOar鉧Қs1:Dfa~>֞ޥE_;֓2\!^KOAIceV\i+Cy[OOyweʻ;<ƉVx n* CdGTĽ{>孻Wyufp٩`E Ľ?6#>[?obB1ɧkh~[lBe6ڜat?y <޽yNގۧۧWM ;WRIR+^$W )D*sc IL%KӨxr.cE_9~?'suqk96>_&g{)z3XGk8r Itbo+c#مL<17 gDקcvt/4̞zXMRK b# *煩*Z`.CLda7>.8nVV *+m̼anF[*X(J@Π 4=#6hC>P|bJcr:-v7 ke/@Z$x94ӑ{t/`B@e轷MR:2ػ$áZdg=)WO :7.aKJh%$֌xp%ļ2\Nȳ-O$9*+u 622_~nӇEVgZ q9[P0B\ȩ+"SNĬu&6D,tn>PI0P<U!45uۼ9U`;_%!gk!fTZi*֒G([bדQ3׹9uuF]I 8a6ͥToxjDY"u? i?<}ryq)}s&?<WE?(bj%z4~{?n ]7:cxu0pI=Djb B!LT ©?2 DgX%)xHk3/+G$I,.@hA[ZXt-:kP :jUE0f**e 0.2zGtiH]>3*j`)㢕 V:iph/9!îF(͐u:/4F٠keM!}l0 Nyl-Aka-y}P_)\KCR9 -%@a:дQ%Y$}ɉ(i[GPpSPӆywKq6"'•@@^EW(X^)֠~0TBR3}s`pC76\:+{t1cISmi9ӲL˲2-A;rAM(>H0VvDꎮ,@q駼S^UåYs1:nQ 7p7) |xԳvj#mMUSy֠TfH03w%H>7ed kYR3G#LBct@Y3\ |1>st~:ߴ~x|D,{צsBْ)\J%vS5xS}d_&n5vdԵ>]Іյ{ |F8:s:`6]vU4<eRwA7Pckݛct處ztwJPRw5))QlR9W/|㓞-`-ք>?˺9}pgڈlҩ^"?톶ڧl.K);3Xh~ *mćx>FpupIxEp*T6PIՖjFVj]vPWro n5-N˷4W$f:X4Np8OwGpgR8hӴzW¬"gc g$EJjQ:_y*C) zl^xXNkS]%H *ŭI੫mlM%oߨ ~|]xp_4v݈߭vD\Mo(d|X4JqE4;t\-ة-ڛM%gPuX׸Ds"^}Q}˞5ٻ"lPCTvKnTcH{D[V],௅ S & _Yw!U=w/j!sJDl×3U^-9! m9*Wb_N> aQw_φ:{//>({Tɶ%%9ȗ9|Mt+x$wnf,/-h΄'r/ͮ8H"ŔK6 E*aYn^Ây _5l0 ͍ܠb[8iM̳Cp>AWNmֽ'"L 4"x ~koxGG7 `#-j#$_=Ο\|F$ݰ{Ⱦ=PREnPcy!TEyS<>$vd4Slvc4&4Ty!>ׇP_{=O8J ;echb{5eD8ʥ1mF}b& ܟf_߯Gi|׵mzӬH$yK6GӛZ1oZ7~ kͩ:"ˉ<0ݗY49N^ Z6ԵE5ᖙ6[N]&-v];d0}lM" 6l5)% LEb0$^=Aza5FJ318o/Zt̵{עsizBB] Z0R>`/f2đ(ꂕJ6*W.)'1X#c)maшPvȵ][!'Nݒ7ab٠s$^Za^hgT@EbFHpVA'.B/xv,Pdf !9A60\i(Q.T;"GGt> p"IBQTAcd98骺?NTR =MM׏y!cLd#iL)uWg7mtPL{ eh:X{cwA~Ң'rvg _`_N>&mVti{1 .9e%HN|+$NՍN7qrbPq-ėkO2wcFwwS@"@5 u,dKEYB?3T血SէէV2rqmMAԑ(.Ui0򪬡JIDL7j94%mmL{g=R3P~DpWE Jʁ GI-m)lǮ뮠ݺ3F5!jo?r5*7A][sF+,lda*=]q-3H^xRűє{b*25{Įf3szO"Xo==K"`Huƈ6Jn*y V*p|%E2ޒvQE;$WWQ.T2"sW!i6,) K:E &%My*mo!:Z)'Ī1EbwB|gQHy"i%8mpc `V|.UVqR¾U."gCUKzkBw=4m(#nZuסARW26rdӘ%2Z.%\Ru}BG0'yg- &Ț+u]: t;)!)V~R>gu5R^kThQʋ趪vPDc[+EBR-#wvGNeEbDxdVzf!!_B1)C4JurNᲇRt7Ynq$`r(T"7``r& %~BsXz$2 3穂ɞj&gM>)Z.q;$q:\n( uXNULBmOXc3dpwoY8QAsg3Z?};nH"Έu9n}2T|mnIoN9yeR۪t+J17:; 70k B5&+R e0+rҴtȈN6a.ꉌ׃5!hV; $:_hhS2_e:ȵюfiyaf{FInE'H:;+7pl t6y{ P( ,Y 8OoUU":@kaEa&XySD)a; 9N8#kȁJO7?qL|z|yFB;;6xpwOq Zޯ=L/?Ngu+Q J4%Hqh O`yu .,F?}K 8D<}|x_`Z?T;%N?G# K6'JK1)I{SD T~W3?707F_ar9xT,/x9G(D)jDM)ch 0x0Krņ'JE>440Vg$ S%F s9*&Ϻ!yQ f܇m$/y^iud8feD6)Dl&}6˨U)}rf>>^.(n@]epT:9qxo\Ѿ,ֈJa܈0XR۝p5:!- 7i: Vy K}E&cR=;/3xk/`gs.9Wm.=N|c,p/z n)b>,:2=+le~>|[+{g0f?I߫w /Hp ܕ ,MA$c[PONSe\fYo>L&0ȮgV}"%l\̌4+ٛݻ݁1XZhpHKdZQ6[]{Mg0$^=Lү  _ALr8䂹riiHA_.˰XVYh o v(' z3 D w,y0=d`a}?M7ĭBR!F$K4eY2[͛@r!IUA2SplA>ݲјj ӕ;[;od1~op!ݪ@P5%< Bp&M"}JEp**7V&"m\klV<" xHkVw1=h.\"rB_5~6|&~tق V]͛_GMi82->@Ͻm209\Y<-9M^zIeX%Ar}B^FZf:t/%!"`"Q 7/ϷLrq-5/[Ҥy%p)[=sc%e9W}m_yvp|£^. ռ\~_3xxBJ0ù{ڸm<: `aML*1jyd-{8F!-V-Ω7d a<ƶQN|H:'3_\ Y= G\5*sᄩpp\XhjSTpDup.I*7](۫oĞhR:@ NL8O?GdO5GM dnPXAl]z.ŏqdw=+FD+\J$M3 P|:Ow@-DSn~P qdئ`Z!H&%F 2S1~2R0 Z2)֊?_ć0[ FK>zlR`1N\ +NnR_/_< }!G?CaʵKS.r7'rjo LKb̠2+sLw$oQDN=[` Ȏ Vӊ Fݖz` zpmȅ>AN¥`u[b Wܺ\;*i',NamUBj#80Nm.1=,Թcȸ Lg\,Hjv(NH`\fLc+Z͈h7zZ\YC8U`gF[ŰK: ^#ܕSTTJؚ %0xXJ!`"GGK K`8rtJ92ʗHWbr+~P #RB8my?$BJ-$R 0"Z)9,C@*N)VZD0]4Ո|{yԊ ΓZScS\IQƍL2E8 *RmZGH+3B/5Ctoآ:0}<&kZ>\MLe5^F>b9S$%dž~U~.~Q[oFm05f9-գ߻m[+zw*s%KT,FOT g*WyCʘ҈4> \5AU dT})튑-XB[ nuW>D+)-ނpl>S%r,"ƘNv"H#[)]&]w_^\}gZ|JKZn8UZVlylm4~=h\#{"R4X^s.x;hPױEL B~n{+PZwVpKi-]N'F~ K u<= X tn^̕n:d r~[Pw b.*ԚxS;bVwN,%ՔY^ qu[ZHE5y~yt;zASo}~YO?d:[i^h>|⧇ξX0z/.ـYI*F{~5cMpqS~O<1޵6n$EV~U? ̇Cfvq=l|g$ VSLILDy fWh[G1T_T1*IW#-UB(HVqXTPtl1+=NT~N%`QjUK20jDZ|byi~dtN&DegW>~:Z?OY<9"g\,rr{n@*(Ih]$$2 j/Bm%Q;L;Hy>П'b׺FT;A̳a9k~7}Ϋ{K1HFK5$KLa"͵52ku]u%/4 ԘR(T|nY|&kZs7lM(ykv'] gD yFi#Q޷tukYaeMWs9.|V5춭jf]AD!< ƭ43/kk79;x ~qOw/8U,o%-nvg٤n@Oj ׏x52uh'YʍÍb ~Džr{hƼ#3((6Ul"KEp&,}͑FWLnnC@gCrv~gGp "A݁9p:(_80#_n!pJ؜lZ~|p.-{&A~ُ Ī6i |4 zW/5OMd$J%%A3>MZ$0PJ$Yf;c3ڠS~*YP`&j4$\OkpF¬8T(C/ AGݫvPz<\]|%`;c.9|"NLr@9.* 1GÏ3QHaohEu'5VgP6? 4Il(JEW~׏RB6)9̵q}xd zđlQPzC-2ň3 &(B[Ij:%zscfrc.i躭/ZAκԻi\1V\&,U&d2it=<0-πڐ,:# a9j*-CO=$E2-\BR(^oslPM5! 2FǙd0 I!>P0D-TɸUī\cb0 ݶ.dsOCMs^mHT E)=3a#8IY0ϬU,EG J łuapT [2nfuvik ;hs\Vnя%fQ#57b>KQU[*tR(Ԕk.h:xCjJgL2Q @i%U 'aM OOׇM^_=s5IJ/)m8B`; N856) #j8S""M>dkACDEEQ?tsC0 MNrƜG䴵S꤈S鐫I^,rtI2"y(4~D0 gB8iʤAj$HDH%7E+G6> 6镥z8(#NA&*H$wyRb 9FQXsI,DA7Dra2H`ZSt1B"VvzTM NJEszfS*)ե#!4U+=jY2S(.MF*NsU&0ύhL6FζKg̹[Q29 B62 P`RU 2jQ7ERj;^:[=ԇko Z=[vxQjS>:3k]:^|[NħU?DT8F}2!8XF59$84_w ;svLS?;t9Dԇf痟eW6S=^81 P8n`#~ET1s[m0Ӳ~tѲ2IʺN PCнxF]ھ?vPn~m2Tiv<ѐNODאv7'|p˞q\5Y>5|kH;ˊq9{j Fnw&31=$ֈBg/QMNK^>̧X371 7_5>'njQcsqW#k1fE@m;)v-7sm]\׶x};)XfJDI} ${a\|xp-KZWدR0n! A.=8*h}ܠkAƃ.Wd4G 8F)nj->?8˞n|='Dy(u fZ&&喝ga#/`yX{Pp4a\_a !=P;ˆa111.)S< ƨ!V\^1$mI<20 shK+I &'RZP uRz7>CCuoՎ>'0TYpڒ bڒu+)Kɘ;Z6*#O,%yeRbu2q>EǽG*rA'K DFeqC>NnPO1+w&ΊA%GP(p+'5pQJ-eQ JQP-6*e?(,9%y+lJKl'=}!2@sC )[C~:gM(b?;'O5C=Uwd)NYy7H`#9@GRl5`34߮(X4s-:Hpāfյ 7Hgj_diT_&MPnC鮓: |Ӈ3։ xɞwn~}Q@pɌ~[v_/x+ZCZoJA(}zEKmpTWOMR7vWGbUL:hodd|{9̨F  0ÔQz~ඍ)J Yn~UFDžyݔ}zrx ]'Ъ2D? $|rgOe`KT X)sɺXFDz#ԘS@e4B{6eH7O/?7Ϣi}=FJ-4b@}b0F//J{#׋G 3'!5B|Wqo#j毗qG9^=TGXvPMnx>vZ˱ex5 3/NB<*"6蕾Z('O># ZĤ' bwgn(/c?_/'W[)5)@mKwj9Z!e/o._${o1|[+],LCd7fq}]gp],g0}ﮯܻeШɻ7^6|x9eAA^8=̂O8IBImJT~f(QF$DyEExH t$4GKq( UCՌ%k{4dqbӆr(72:%ʥ&5͇< >Eʥ)I\PF!L2t1TT2 c "GC@`nqÍx~r׋۞cb3WX Ud?L_g_ zw˫4n\fo'BsP$M c8}=ߟ%1taHfZn] {=Ŝo0}y`[GMo݆_wY_ R-QgOerdXP܋A27ޢZ Lf&U;[ngKΜ5x>zō^ Hfc*s,WXSŠ |UVir}I!(T=)9M`y-8)/Rq$g\mbY=DB4^E?IZ'kÅ 3@g+ri_ŧxtZ\k} A}Y8m%!{BQtӖmgj@^EoͷjK ĺ+HZqdVDZ^jԎ1iU2VF_TH$1R? Lz:6 i4_nOxfH. Z9Ud.#Jh&gfj{Ƒ_1K|8${͇!RTYfGvl)Qvŧ*Ls֙k]sTF"Vy.-(`”k­8$drDK(zP͗s  ^cϝ;0r~osɰVk]hŸ6k9[[p6(8Rn|2GBTvBr(槿b@eבV%!4gI]h4ڎ\WW 5`) A%%5(Qצ֥2Ek?C*F&x7C,ƴ(J\JlޏY{[93Pw},|@|;w}.\gsp]yE[~M_j?~\zZ?Hc)Ѕyukgpw6FuFZ/"\6}J±9ݥkZvrYדqM)m{7&Lbc:cn#F-Nanx0۪RP$HA܀8oᅦS;1i;m:iPaϮښUOB[}2{ON)b*8B%Eխn0>e :'%+sbAN.Q$1mjQsQՂ\TGPem SH럮4 n|ļx'5vo>)?fQ:8(bZDV^OUXx}Ik]gcP-A˩|{S26 ݛ+=9yHsqfO Douows9q)T,ArƑr")Lj3WRI诬`#TDE_BIJPk(4+Y2vWl{G=|3U6 \aR3"TBkkeV#] 4F[X xJWoSn,PTZܻTIA\r9%3%b,p0 ,C|n 7;TrŇ/Y/-5Jh ÖQK 0HΓ]Ӳe3V !8uzϋ JW"uJUsPj8@j`U֢ `4  o${y>Uf ߻OHW ] ˰)I}A] rDkYKV)0.Ҝ`TWh]h~淘Ԣ|};{$gpSj;ƴٳ򙣛eŹ6ͭؾU;>nZf_X,<һs1ǗŦ6inE>Q{X)yG$G=ͫr%HnF6MP{ت|3G~1iNƙ)"3a6A>]9 uO%\Tο|AaL|I%k<"䋻4"zx!_cC|k"(Ob7,H[7$}n5 F1 g`= 4mv96{k`}fШe4UW*RUCʦh׍GV!0>;-6S*ztn0 9=pl =1eH2&'Y9ᐢ AZD28y^$N4 hFd!;E>~6ёNӺGG2t?cGԉM0>6='G$PFL!]j>C/N!*V&%²?Z~!%IcPd$cWZ|YqU ?=[IH&n&0({$/pq2}rˆ;O)مelKu,없pSrPш1W!3o0gʹ2O?:[g2#ᓍ]zehc!#!&8ZZ(eq*bҴF,7XY\#I=٧ɤX:٪4>{#'L}U%xݏ9 x&0"u&3 ZEU5׳>ղ׍|~WKWȐ}xyo3Ʊ+!)eYVNWЕ,ͮ~8dX ZƙD4ץSN\eR)@_;NTVø~r"%NaRke"ƱFOw2(+܃+@[%9@`OqK }^]~uF`4?zK[;P2S1b&t7s1oAe1% +[RTaYm).񤗘wzI,y>[1#ǡdFҨC=kzbu<+Q 7Z =<ÑӖҴ,j<h ~6h~?SApzF,\F)9ʫIfpql=9 a`g">)ˆ4gDpP}?hA fEDtD&9g/^hS`4+Wz^Lҙ9 $_WfRc[&<{nfEفdW FD*]0Ѿ񁩄X8!F irC'oAQmwxHG tNI\ lUF-qY-R$`qIKJpI BF8lĚ>3!U&Ej̐ (w4p9=| S-gSR2F&:6Y2v}yu].h.h];EvV/;mzwg}0Ϸ?Mĩ|fܟuU.}qgyRXJ't_^G.'xsk.+B{"RZRծuh,9>9u2KtS;~@c!oDlJo[ލ29 A餎ƻ)p-yޭ y&dSV-!Z#a'֊ +㶕pİU5)Jj^R%Jp-fHGԆ$H-p;ԖF$pY51HItRK(t;;|)5 Zb\RhNlK?7nI6%wm&^1D B1ZEEN p}D,䍛hMQ|nSnĘNmۈYөλ%O4ֻa!oD;mLn|Xt3e_bQWvӖL*CĖEPuw)*q w@孰5 JTC7?mWE̢ pӔub"DysuC{dRPT7z`8Q2CUmDUB?i\Gϩ~?(gT-_KŝS\<JLe)a|$1'gsHe^[p쫽;C@7.!:|^6{zyUjO*Ğ&{Li2M{Com%*ZV[[30Q+0^؀&Rӊ ;L2G ֫Oi>fX#|Ldم/|gU6v#ւ,a29>_ReCZ\yf-nFGbnHh 6ZT@+/h o_P" ` !$udT"gs&c^hZRӫ9ܮ3ٌ*E - k=^:1BpTAm`0E2}qKa,<4Sx)a(V^&+s<\WBZum4v1v=ĺ撊Z SN96n$5ԵY,-vʔ`Rbxa`/LT( 2ܦ: ܝhMwUs(%L^jb;Έ;V1RY·ۀ^->@cix1֚Hi2Vq;┢IĊO5:]hB7^:UKS0Tp j&@`4G}zL4 k_j8՘iV}63JhϼgUYL%<=}A-b! hiuj]HG>Z(Шg۶|D% 6[ A9{9/h=Iܰސ^.1^|r"U9Hoc̐K򛯨1ʅ,W^=jQ=w.Ư(aYVnlZSҬӧ9oZ0zmK0LU BWodz2^c3*929%#ܲ:\pC5qgU+m+4 B3wmW]ߖ(Q~l^usޭS.C9Q/?7_&`humxhPlnfL- :&fb]- #9g@`o} @D]dxF PQ*؝j0!޴06$ &؎rTKO**e:rovťFM{bӀt6=5 #O߯Cu!fo K:;.Wk.Ǻ8; _l0f@G8+Tә[tM*vY(lF'y,> eVdMZ+Ln ¸ VP)!{xvJT5z4 JKB u/4{ g3.RZYmxÆRZ-&O%-PJA~!(W"ޤ|F ~-c[;@T]'JT]'iJJb` xx t8VAǀGJ^-l|ŝƈ4e! ؘhrczN3vʍѬ/OXykÛX_~-yA%8nsep `{H3Fui(qޖT F\ lip0#X@YFmV;&j! z7ǧgz2JoQ)4ZsշpW& ):}e`!UL of "rDz5{=SK/8mg ;T?pCl\{!9A >7TwY[,DR&[LyƈVP_X$*>2ɋPu%TVv;"Na ن,%@WdiE}o Mir_>@ɳ}zhܥ, t_hZMXazsڤ|O7WO$}t΂׳]J#cv5KV{dtkءWO3ٱ0naE#ll1;/q\VJM_ܧ*%_{ .` 7^1Hn'qguMC Wg;c5혧HNO"Gui84Z''PB n~/åy!#d_R^Y|fgntU`zr\O1ATiT6o^vQЛڗ2ޝj-cs]|O_W.U15TX!gN<ϵ?.S-8g)9JhSnwShtmZ]yfyf.$䍋hLkUF=jR rDt6mw0v+/ݺ7.E2k&e[)9S:FS`i-ڭ y"Z$SSj7bbe3t:iEtg━ݝ pə"|[7e']*į|q(, ຂiU)&F(5Oer^7+#ϭN Yx4^ę?dAxYRf}9ώȘń c~,Lx>j㝧|K&in.\^xUb*)~=52tdkWPw1$trsWe*GWޞ #eUS^)RWFG齍VX Mm\kY}P E78ז'x߼}{ǖ^OG__[8XEn+ yt6}o.9ZTY4`<~]_)Mj>:Fh@Zz$]?":ܣQ6&5EN CbϓWJ̵#:y(#{ImʧSiTCV"8JOsy 5`xL!\QF|mjR)M骶⾨#mdm19clfLǗԉ53ş c0"]{ǝ${! =d$m^Z)(JCأE5^U )2sBh!__ 9r8n9K `(8Ml/Ϥ,arӒXoN)J%Ȟߣ2S+ Bţ-V/i`t_fcP*` pSJ)YN+CTz?/Syj_n'7w9֍o>}Ƴ7.<:b _h>_X뛱p>'4^1ZJ5fZpJhLأ[!Na4:Ui 9LJ76z25Ma Qτn& O3XXW~wys4\iM~|ɿ?Rwd2[ϳtpzyUBDD6ROӔшQIUSv4eNSv4eNف/juWJ ז8njƤe\P*FqIor^-wb21Jq2$'HJ)dJ)dL(i4[^x+o*WV^kq΂ֱ"l.HKY2`/94BQeOt:Mt:m޻68k!h2TXǢ>(SJ_p&i&hkxY.n/HK] p Hi!F5_NnNn<[07X5$΀ȈNk]SX ' ]N]@g:4ZN(p?,Mjj+3_6B7жvH9LIIMIAIʎ&88ׄ/Js7_b<Ъ)T27(D< GFH]hV!)Ua%WX`qhj+JI笄u^—W7wֽF)P|p,vXc$U| ?7O_ߜN$;S^K ]JV#-r8f6(+Ș#I*_.ta-('aUD˾}`z9I`C0e0CBCL]\LV\痱 IEr@Ya XHѭlI]EN)kXkFd!c`_hfF ѫ vP0(TLVo(S%ZKǨ$%r /&>X"L~9Pm&A (l(3P!% D8!)#jVWv&Y`P`[s)Is|-/RX,Ԅٖ5~9Kzjaϋ[SGq30ɬM`̟~A.Ο~~R{`27;G?]8x=<#<F~zsd'S<7BgdŬR?>Mmn^ SFOiE|mw>ø!$XG6-'$*dJ [b)-rL 7EMO[ :ޖpl%$Z0't%Aqђr,+!kǼ IƆ/%`DqUC$N :~Pp'm(F-{aƕ6J!aE0q\ElJg!KTA09ihgJZ Sa$ \Zn"ߝ-Ө*BwϱcLH-BT9e ڀ_.LSAKgia,OTb”bLA$ v[ r07 Y]Y +SR[RJf2&,S2)Ƥ4ʹcrzǘ4bpaV#r_'7SzqG!cV5>Qx5lWcuŇۛV5k |^何p _B,z!mZmC\J:@N,=I7҂P%\Orf=sB߻rN 3c(=?noWЂѻǏWo?Gx2BAd5) 7XP)TR69֌<6?xln6FQ̨$5wuFp?lnF.Fe !.ja\z@f@Uޙ'Mgga ȷMp^Q[9?DvQ-Q{i=OK~-ηFWCOVXS{djG3:-Ha aE)K-ieߕea'0RJ \aDBs!8Icz)2ž($RNɽKbd`WWvXлU2 #sG]ghDPTv=6buFx95uŠtI+:/R4,`:Qb?L2:^a:躢pItk@=  >H֪;Ȼ10yQpȃ   Ԥ~nw}n8 zs( Q;MaͯABt++AO!BpwESveAaCH఻t}tӝO>]Ed)eBΟinF/?wRfDtCcttKhs3r1UqNwH"3Y9N2ZG>3(=kzd6!!P'gBUHTTNOZGtyn8_Ev!($5#%&.sa=LC.{\Mr +O]JdˏiԌ~!B|EpA6ܻ)D3#z(ꖽ(MkɽK|>U +u?bwď涄Uz:vO!1׳^=0,x0n܋q͍3?W3=}fnj0SVlX{bLTv\P$\5{w_}\1ew * V4Ui+EZiK)RGV +.\RBof}{7kR CEtzo7` 3|1A,&($bXee400߁5EaXStJ2I9j=KbF@y{yq-;!PMϽScAMKeWfkOAVmJ\;!t[i\GMU13֌h\Q{UyrN/+Z•hKKJPea"0Jah6;f(%Upea0/•@aEsIE YUC9;R!k59%0+<"m$ܖU% Tkʪq-*AܚKquUa ucypt4;?ʲx1W6oijKsKZt}O͵e^A4<\Scva#o卻W'7>wLN&Wooҡu8JW{tj~tڡHV"/:c 3qESUO]ig(REN4PPV HJz:EV$E%^Dd~D C!u j`셰\T5WCcLd`f@%3YUkܷUu}tkDEy;v1BޭbBܕ9x o>]Grޕ;<1xN/+q nEBdl+EXYFK_$Fkf]-x0qḭ./IEqE펫k1ɑ}?ݱzۇ[A4mxǗC!LVK8n)DqdbH:iTH&ZM TvAz}Wph |tHg*COJ#Io3[e{ -ko?Wxz=%!D݃FYP{k"m[D{#qÞ e(~a z; bm}׵j/O<EO!oR}y6UVōb"Ɩ75a=*X ]~F.n|lq9qLh^1eOwNis32ɄG1EM%F}Hj( 2h^Vpc#Y v[C:P`,/ԛ{y^^5[Aeݘ]=KWnq ծ]Jlmٴް# GZ3wY۪tk̀I[s2tMp4CrqV69kv쬵kKwgֹFΚT!bn﨤BfJmpBIvG$#"?W+ym]+R2&g >H~I`Wh4̿wt? 7 n'x/} .╻(2_hڔډ:C4FJ9#)fE?U^{~Oߟ3^?_.f}vY1EF^dA2}S#7{[in2s4V$lW3?JBZQd9!y 91K`*ki1 zPNyxIkj )l(o* rlKʨ%Ӓ!tiNJa>gَ7/Ly3o5dYi7fFA !`\(3d[մds2?vN6ȋF RH[ *]h(DӓкG*_תNqLp&ёdN%F-E.!P:V/{&ˤnś "Cl.Uݧ|+=6|OB xyNɸ(,|e.Vk!x bZ-n\2C ;UA58;܇V] rB S#w@GA*Ke 8+(SBr8.%"akU$SEZMbD4J_n'V$%vv ܎XkPY# [J(]׉5hFB"Hn䓩+v%mx0v.LlOF[[cp!f aTYiKXQT CؤZqR ƺA'huS,`'2K,D5W^R k"vhЗ _J&=F |`Lg 9[g:$p;D<v`P8@re 7v]*> (\]wXL7߿%o<|U`Q<_8W4DD y*C__(<_,ךx?Bpsa3@)35d)5Xg3>X%h)x{wr9M *.{ U\!w:z4L"TN6L;&]NA6 j=#984`ȔZ) spj!aHJj,>.D;h"0~Eb:.xPu>⬇k]:L>;湡a(a2O0 T9D )֞983dQK2M8ir-'LQ 3Q<}ZRZ-Z0ܟ7WZc_SDZKb%x3W\ON?4[n<凹FSwC{7w{I0]]z2+9Ea*gmrg\xsQ s6#Z}ьPejQOl\HĚ*Q'$rA`zc0Hm`#uD{I,u*0) b ` =o4jd=V .ъ-<0Lr 0_9D]$(JX!1@p 2nЀejnP}5t®a~·y6\cFjQq\:2a22L0S f0aEFw`3XsD3͸D$aHVV a큡Dd!M! r\Aa,O ؏ =σ{8Y'ȈV,7X ~ 8X^2UqA_6VPjbniö:=b{S1:(Qv0 #p aP V N=Ai Haͨƕf \m{V=Tݵ=0c۔~k<r1$_=ibo&Lbx^8u|mr0x>8x^zK G/W׃.a r?3ԅ-bC,Lfk"@~r, NoԺ.JES [ls&lF~ GtBnIݠ` E }$A@Թ H⛱NdvT_FUrKJ r9P%wӡnJ:"߮>93]~T4R_ SU#3ƺS#yE&`i*'„1L홏)ˮJv7%#Oc29X`; M$Бǟa&P?E/0a!9:Q?2~eK1qvuW̝Rt7yڤ_xxqLyOts={dYt48;O#E=H_(ǐ+qۥcOю֎^uʱW?V7vKgڄvGsmm_}mIA[' G}jzxqkjiBT 51UY՝+ZԹAQNHի>?}8qBwNn\hv3^+n@!VZAd:YjӝDF!|±ap1Wqi_ ]|ƶ;aq3VchС$V}R.Cȹ5uj[*ٕ֓ߝN^~~RIR8KhH|R>[~.krV2u|!tV+},~~ypMVW;M$Urg IN.%eZ3ӥyUIhjڨ Ҵ4-L4lB $,q ÚafŮj nu>xA~w|䊵dK2!eooV)8w$  Ų7TSw7YU'&znX]eN9r pՆcCM4ɦwbc:ηtZ Zn,1ޭ y&dS\~߻iwK tRQǻqtZZmۻGz&,䙛hM֫:?nļwFA餾w; wEz&,䙛6%t+3vLl~7[\lcXZkP0Ň ;3x u86+Umfg]^/* "MtHq/Dw炙}]OR‘w_=Q? J".$s"?N[NiVd'JG`U ?Z .FR Vh[D{PӶ,%~uZB%x 0ɓ,[Hv+;?}x- /溴 /.B0/C*%n֗ F)Ѵ{x84)-(U>HGG "J#! .9~/90:NOb\(E:M1Ofg4S33dr$LSF-`1.~ 췪&##.(Ԋ0(QBZYb 272 - Z+0d:ݡY5۹nt1}7d)\R6߿%o<|% 1D,GisG x/7W|ay10+z{_XL=zC67>|A# 2ҧf~DTZ0B$Ih_n U<ЗЊ䠃#M Xfi?~Xem \M"爀s͹!dL|sWi<򃒇0O':u;=Ėp.:Il>ډ;z R+iw{$jkt'q+H'QV \PZUju ݩ u4 kBg:UwzkU- dTxRy.ly„#P(^)z(`/i&8DJ,ĄqںZ/8]hf2-ђdTI1RI1Zdȸ0p}+U@ PaiK2MZl~{ɭKj\iRD hf%}^}-( }hIy1{8 Y}>p?mP*.ƏBՃJˁ3vPIA0 2K8nȕtL7ZѶcU tLjP9j%Hr$͹lNԒYlM.v{6# ª@'\fQPPτj N=C2P3JqiçY~u5XMGWf5CJ/d-RǷY/J'Rq>Q9Ss4ý!y> K\ɾ<# ZtD}q.wQqA. rՃJBo=nɚq= %:=6Ʋ>e s@&iBiggn#T8MGa i$ϽTgV+#m5BzMX^WU.>}c٪?ZW[ikcGsmm_}mqUw;bphԨĝ14ԉb94SYs40HJC:{е.D@ptV8PPaF:ac„V[/mn/w[WSl.d/_կ6䈲{C5I3C%*d7 @r;q UWLCzPLP^}gQQy]SB U]Z-}k-d4ⶭdÅގι S-D+8Y&tq$ERII>=;Y%mt/_/4{>. ջt'f~f/ٷ?tP2xLşҿףɩ jƞ.BHY9+Eu?3vhEpeL,K_J V.S0{.dQoʪvk^IأvAtv۟J$䅋h0~zUF[W jDm>c$eܝiW*[Et{>7iF\hy *Ql=ۑU7WooYA@OW MN,:@G.fhz{}5jmۉ1r/iO4:(͞ӝ8!# +#P^w [o,:׈bϳ2-X}1qq?Oѓ.=E6jqj n3P9M[QK_\2bۦ.2fRS)s8T{Mk][SEV7l_> =#P1Wۯ~m"L-ë _sy7{ʠR7K-P׷S>41^vo^Sb fG!vkhޡMWf xP 4^GcLCm/֤#X-~Cެ-OK#ȕq7Cwqb j\ mɞ%kF9{R/\3tcmŐ7-*="jވ[-fuºYaݬɺf.4"#O᜹cJMgd69%cuɠQeuNݺN(#"+/D'Wwl߼qR Lߝ^?n~Ud*|xyS(ף,C J"U~WJԩέ8(H+KHfS@s,9 :qY/:d6Jc=yJuCK-<0)wʈd\B.c}PbILL+n V Z-}j9>{-K-S&Rb6HF{-"[DŽ'+p-S9%4;vķh e;znnVP8L6~ІH vi P xi>ن`% i-,O FıԈN1nJLi}rj QjOsE/P9Ov?0p/*mJ=U9p|2*֧M7/L~[߂.]qMz;Bw4blﱽ%ߍTOnI5yYN+h_YSpGIG _ _ͻ_T=v9'ƛ0K ^J"n7sR?_I0`W'94ea59TQɀM{xG!uZL& i9X0ػ}GC']z~Hxҵ+:S,nZLBh_H%f 6}~"h0݂P6`SaBEfPIG;f%cÎ7ǘ<'R0G+?ͯ/.~_U0ǁCݑ~aߕ_"p4V5,GxoXX\?ze;9+s8Ԭll2}&AlgȍQ=Ğ[8u Yp z+PrWhwr{::n˔O^Ntj)cOpOnl`Em0hѥ=yjmwEXͦ G[&mEh%L_~G_Z)ն4>4;Sɘb6uAvp[ؒn(9*aTJ5!Y3C#k ϡg|;fƯL˫R 3<.[M-/<+{&ZTqf(Xmlx]r%dQ-=ZO''WCߕ`J?^e.0rf9]> {{9WZHWhEip= 9:m`àfz;Wnu2Bܻ))5|Vi17;hp=vTؑo{tlln2E )2kR+iQ(;zC8Kq(7øFZuz:-mvכ2dJYOM߾^Otq}++,%Q|o'߼9y}b|hiUʜ N0#;%hϻ~i+oʵ'ъ1\QV @p'yˀH;eA*$ɶO |"=|~RVmO#p#U}|MgqM-BriG1 d2h]d>%Ѯ %NZqVn^Ϯ=IBeW\ƛwOv~ N5=65OԂL&_|~8hN8z`zpbGF_$_ͯok?sI)AX~j4SLj'n~ݣS!,.gNh:\`O4Vy QKO&1o]3aP[uY)w~|HFt#'_;rO=)b-!-~& [U|:җ_4eR:כ_﷕gRTJJɳf41@#kurC3i i & -@#Z%b3rڋi09ZxP>蹊JXTYuNfUɷ`$`XapWΎV2 ;ZG,0w4NFwd@h[KQ";O@/#ν<Q[pF1PZyJƸ ?&&80 \3>b5-k 1%v*q 9Z흗hA(s)|fA(og֘ˑD',<,(i^ xwq|H@7gUN(cGW$'$ =G##8iJ\N߽W":232Eh}lUޜ73w<[Y{]{]u52V1 ァ}*,mzLJҾ CsFJ0#զ[feǪt V]S`eY3p'vuI|AI7H7O&ӫ0]0zO|2HbdiXIԶFhH.8T;)HdĢaoc 'F]Z[tdcFyYdCe>j!U yE6h8*pd-28^ZD76/`#Al|HX'=.5\jD @Ni~:|ڰ o2˟6Π pق=zƨQڕXAڞDʹu2ĠЅ<$;-{AVh Ŭ?b%! XkQ͎:+XN9FcYŤrLnQ<1L >3D&Gڲ5䉑o@IRTV4'$_q䑟ZNk;x *VIC)\nV:= "fD?{ƍ K/v)e{Ru\r f0ҒARxC̍W"AF4f>ntg1A)40(kšjkb ߈d3L`5Kmb{DGǛYôuX74UEh-ۙdx=lTabֲ3액3zJZ5 kg{ƁUqNYf2 PVU}3#8e i>&5\0aef~ad; Fǔa.^+3pQme tԧ?jx3s%flhl28ekT+N?M[^|NM ǿ%!1ekHohcА^(7K PeޢH7ijDdu.:(ws7]6í˗*Y %qw>~2nvW]PΣ=nGU  dMq3MnE{/s!_:q^Vu ."(H?6iSY%DNeM@Az*xP:z2IܼxT=aVRТZM_v{;$ͽ[KpSIlGKM=.dB"ϝJfB-9aA&Fċ_Sӧ9$c٫4.EjK}^7nG`|3qPSre1Ǽ4->Yao,\ v@˫0 Y37$Rßv%im-Imw16KҙwK_Dz&,䙛hM_'/|#A[rD /sB#AUWM=q ;!ASPEP^Ev.%x~Ozʭö8ZM}da(_lMMK77?Cp)hQ1noԠ#Td$a]}7q6H1\Vuv-buzm|)dj>%QڗU6˜=s$4]/ŅM{/RN( C)mCZ.72n`.VHFq@.f8[ *je>vt3zpj>f/.եX5};ubZTd91N̠3!" (xׯ.bJ>޽FJ/^n^tF5R댂J>|޹n F[[\uTzX*@?SE^˥/yzQ.j*h{ϊ)t< A|hC Oy2p%moʻr>7nP{[4܇65ҧchcȐ+ڵ o0E[1<6w OVqqñ;K11QntC5^j/O+jA`4m.7bBzSCNRs]hEK'0q Q25Y3e%8)y[I!;FFu^ 9 B jEeh9ӹPڜ2UZ`؂:ξL1[o5$$:]Nkh;BKjMn^ж2*{8"u_"Ҫ>$xCIyzsm߇paY̥`SR[.lVKK˜Yaq,$0KLURѷv*醴 %N[Jy@Itp"ItF7ԥ<bIB=H_7Bt$uߣ htd(+I1qRMV'q`Oܯml>)l?[BtA]2›A 8u'3vc!0}Iܾ}e0ek%PJ(5dD0ZR-ņhmfrZKmNN)NmUL?؄i!NXɈT: ]Nt>7{g?<04W13 Jr\Hi3tL*c*;ϫ?|ax7E opsp_Wsr+x~sqaWr,M_ףkvإtwIֿ KV*}=>3A\dL9qEd;I3rG7"Bˁ0F;DL%HGmjqHXGu3!7*O/Nri(Ȗoc 0cft /F3JisGv skhKKŰCuk:^IeF;]Yv֔{Unj9yWÄ8L9'ty[%OGyCEy:zE }MaXJevP.ZKL-A59-beVQVlv*mV%.sP~8Wmm9sV˜:b.#92sFƺ<ύ̴,9' f̀1M;{ N~MJk~eKO{UQ֖Tâ^|!$kQLh<D1r^%')-vai{-*#V8&2KeR?*) Y^LP A*J?ſƂct|tVOXHy۱C+Dl_ GhT&BM.1+Dp@0,k 8>J< qWriF 1~wwyd4caK30)sʁ[o*RENaL M9mLafn6,KRM*9]~$ָbR2Δ_1Lø*8*+qdc8 FZs9@8EL4@E2H9QhY.~nT 8hc]!a͆wN0DE Q1W.Z}tǦԣI/ڢ׮ySS):,ttt|s/FCI7u4 2{nGݐNMwk:首@HRw$P!ِTXVΕY%HUpC1+BB,.ZEtD 61*O?t35p,`iI*Y`䣀5!U;1m<״MX37$Ï1v&ͮ[*1Fc܈K#]Hք!v+Or2wIv5Nj̉}&Pk$& RZXdE[*ƅL"WnlN@.q[v n0ƼM DWIVϯX|pm:xcӒq~!D頺Zpq7@ HO>i#EGn_졭/A𩔃gmΞ2OpU& #n$1C}{ 0R^ۜ%qz;_ ee/(KmZ=%)o";Ήedw1]*41WM3LLpVUGJx ֲ2!xFRr:^-@S ,`ri(ͲL+[XѕJyf@Y22(lNMl ω&F,}p&O<1:l3Ŧl#G[YTj+T_?{O1JLǮоM/0E,X Gs*m]Uߔak]w 0:upAhJw>kFx9ۗ=.` %pPp/Q0i't 4CdMOZ&9pH6l-gH- LMr[FEuJLHjcjT1kr:p uay?jQ銅Uš *FM'kr4h-T& XߴlȾgE+:+x^эut#DQqG56BV[QS ZJi:1As6ku>J1A Uq3LXX0~%n"*ϩٴӱD6M4˦ݲRCX)<+N/-Kӓ0f:{xٳw1A$H0ow$9\9?VQQeLzkb4)fw9RQw/u9QR-O jdFk-QrW46aa)(`2'Ox˄Q8Z(V\ Rg ),I9߈xb)qͱ)e`wA-}\&Tr=w~TVB޸fD|e2H5(Rhnh&ss5.>[:i+HDa2{DFhxƲ 6T݅w][+x|8Xq)gs-=.;&Q}ūFgr`z[zq Q("tZ;G\d+Ț׆L6Vw7^=gBJ鮩 e2:>@g,-Ć-q*0yƪA7jN"}6Ҭm>+h5^k㋏uyȊP]1FStkT3"5!Eh*O9Wa=I#>sxF&=_0L_K p`U "cJO=}mV/@ B-XiNMLCtK=ң=ZO|r_^_ Q.pdP}mNpd}IiaUHTN)!ktӕbוvUZG )ṱ#xv"_~i*ܮ"&0 b%c̜Y A' $a1Vs0l|x0>@gTeET53 AL@Tę&%Hv ZZmF+'C:'-hkiVKak.,Jn U&+gaFO2 8R ܇K ИVgbeaeka+P1kiYCh= )딯$AA59C[I }|`G⡏|u3K4hFÆ>bcM9(̡2c*A5 ^h#DXI*Hl)0v4<ѳBBm[ڵnSq o+R7(&2^D' 5 EmH bIoρ@i{٤Ҋ`Dn|p/Yfo I; }ߝv~˷_{s?\㏜] 쒓`Տ-{o߂l^ n_:=+ h 0\TWZA7us{LyU D=v͗^K¶8KN8tqQ yqS 8Pƶ(>HP!r&@_JQP ¾)ߺݤ$5P}pep'?/gқ'o`PkzW_/։+`-B6Sqٹjckwwo1LhuAYKmH^RbpV>cڂtNM\iJX76DXo8ƿ G3b36&N[9K-^ld3Jm0w( ނhQ6^HG# 9Q6TmCa,AL"kn7RVXBgT ^)oa#ji7*T!ʓG)<@UsB1T3S}%UƒNK\Z n%sv:,Gw٧gE9~y)\ĎZ1fU>ޗ*1fV}B! ` oc*$p/(fwGtB,ȼǩ=蛶VPRM<t \ 3NxB@4G#*s闋>n{k gwrϏ-bk[Z/n]Ǎm4s/KZڧ>)M#žB2y'wﯚ:W*ZkRbv*E@嶏%C,-qh'#p4m͆*^o#e5;ڌn}V7>tÓ`3)m/ cLuUFlVqF ?f@~60ǟI݃gju6|3pRqWLQI39U*D;XYyp:mRۦU[)l 9avj<9ȸZ֖ T& adYT/m#CV{LHk9PI!k栲 k3DA~B8T1P0Uw/^zxd"dR uNh9[vl/xPeAu A;+1ufBV` ҳ=Ϣ$^::|ρJpu>PebyP <?p:˴LU{%DØ!֨l{MXaeF/dV65*<`cTj$a*Y%SUD../V izD֠bq'^eM$ْ5/)k"1K`:8|j; dd yDA' 辫9F`yƚ!&#UF*F9 ։n$7_3"^MICᖫ4,ϓy} -"ilKtŞ)'sJѧeaڊDC[fޫ+fӣw"mpzSJڽha y(wgW`(hTdzUa]9wx$_$1@XAvI dD{>b h;3[߳B8_me rUc-G4hjSJ:M*`EeB1; "n-b Xs\]:.}a%R!omJ0^a sּt>8KTDJd:(i;Ǿ,)A  |ه2Ҍ#zQUDKT^ZB EPAD NRK( u+bv^Ҕ kܻ@DM\׊fV.Z%|5DŕSI~B3ix}JH1GB^>cAR1?g#k%@Rd*`f/u;rF2遥ZT/7B(jFWˊ\qsnиS4foem@c6R8^qN/dE?, s\RNz|FğJ}ׯq~Ƙ;epa(tk+"< -%s+_2 +x((W^po.)Ss1drNSJ[B޸fٔy7$ A>cw[/itxjOJX7,ᏼ۳Kn$--tm1Oe3K X7B;n$[.)!mΠSEy|NDVB޸)BS|BZҩ!)w _^^20fUw8 ֗1v4&aT-t\ a2g:+kmvz)TƉaI¿.!DeF.r;ʅF=tG2l!VX]@巛3K-@z!;9xwWYRʗtq*NR4LCBTWPqui ;]aҚǐ۳nJ:ddsZJK99ACXWXa2,:ؚ1p dnpVj3m^n@χ$2vI҈.J60u9C^ɺvMc\8eA2QjVxJHjVq*g i]5l-5qoS2S$Hj>-7#5)&B2kiBpT/eCU Y!}<,<0vo?8y}奈.G NXƠMA - ~AB+'J˼}?fs!')g7{s.r̶W=!GbT|/}Lmxt{u~lצ6۞k Jm}ヹ&Suv]|.w旍`ʱFjR*啯*oVj6Uޅ[ }gښ۸_ae+;2UvNm9Wl^R E$l~CJC C ICk\?X_?6QjGՉIșZRgz/ķ,Q;^jZåYKSN87J1uVtON#A+ش")#QExLz4Z EEMPA s_ʸzmdƪtպB$Ɣ)tR^L %eFkvi?BlQ0glН:`Tz#T(-ȤKx 9!".óck³?,M"yR*5JΚ#B2K WaJBr#BZB+^- -P; f͊ 2ae!3’ㅳ#cQBi a \pz9#kw6*>+W۳U~ akHX@L%f2`l珂v%!0CPpuK _QxKt~m3)!]w2c՟Eaulp}Zgu5BXD)2wǴ+~9jԵ}e 5YF XJg r-SyvCkTk&ӥ*C";VE~Xdsh\ٞ^j{8~%ns5ږW|689Ж>]QQt94r{ރ.q+(DGͧIDwU}4v#O4*:\#gIO'"Z+pIiǛ㶃N~ M|x5¬KOs|"G,HmPp% %dPa‚%+rƅư)CD cBC{zFXR>74:C: .Bkx0 f1+\#RВXu6Ue ?.3"Tw_== ]J$>~MJXzwߟUMW?=tx} ,:޿!37gbX'[TK~0#N :DA .pnCj*pK] zvmT)g!-s2Y7`zx T>w𽻹䠀Uzv4onf+RBFt.ppsǁ$v{l\F}y~=C_TThi!27W7IVT bNjMVyAB+Fnm>D)99"1GF^Ŷ4C?ݓPsB[D6$ҕ:w{wY!ba=OO? (f'kmfΥAkF+Z>z||5. !2*8WFMP@TJ2H~3Y>Q`^EJՒ ǭ i hA1ne\Q>ބC9t}{'9?3KƆëQW@VO9SJw!g:Ž ?I`LTVN$KSۣZȩZqeG*{ϊҌecn 4(^T >in{EhsND{ۂHз5 {#b2J2o&?Wu'o=l\ݛEDi6D p'3 -} W QuGaN@~+G*JwZz6DܗhOnLh*|Y@3^9mʋ`m9djY9l*b`4AT| BMCHS%GGp(;CN˺T t<1ɠ}Dz]AW@`⬳kgI3W¸7LP, u}FОMiʕv --n6mw>49dL)#p;x4 -} rȊS4'x@N8V.o:*Z^%_ppVO(E@;(qV]ŋH[i۪'zcׇЖ&ZGM|w-֋)N $d_*M }*RT >( ==ٮ'چ=OVa~΄=ܻk[JXJ? U ,I T|NkHz ŒAڢKDJg ;C`K/Wl}%-^;Gñ.&wm!_._v^ț7vt3T0VyJ[٫/fj2V] DUO_E<;GT3aEڧ2ijm:8ܮ4(H"%i&0=o S<3y k5()sg mZs͘aS+6B@H8W#JuKMK1ѝ5]h8]X#s(L97=P ЉM0Ŋ#ŠpRaűUk#(#iiF(@Դ[ӂqƕFbU(2RF\U.AМȹrːe+GyXax V /sH.ܘ BHvY*BdZ)5*zѕEEo35L\}*x43.Wxe+j!KS1q4o%[ji%0! /SB6IfN,ȗ1 *^Q~= *~R;E ףMgCenOob-9A$P&^hS]; @]gf&-hJ(r`M.6Wht7DArR~Ju(Ow8iuᦺ: ON_j"/ڲY8f5qΣHRFڅј)R rE]jYP:kY٪D볻'o&Oԟ$9?@=u˭2ajHtAf)E㷼^9>#yF@ˉ,/p4$הbYd[.Qlk&YTH4,& "5iU5 j\2d(k-?0k`+Dqut;4{AK }Ծ\뷓?ğ#"C~|;j#xnu5x_^ pUKAШӿ]PaSхoꢳ;jk;,TtiD{/UNnՓ}V,ߞS 7ki^N.avElwipUWN@cB%&W8yih . ?`WB*YOt;X,TC-c,1{Y 00V, _BðdHž'cͬ/|XlMߏ0.È #:V-{ m bRV:=:-NX &R deH%χ bn6)kqԎ.FHC ,S7Y}|NjOqV2!5%1>f-pMj#õZ 6c\ɒo\Zsd*q׻UbZF""_^#"4U-UTͮgXC2nbcn"BRQi k-DaֶR$G fa2!b##8?H K\6i3Ca99+/W "pSАpAYK-}| rȱh[iʣ GX 5=J'YǪ D5{ /vg P6D/ـ+_ޞYi3q~݌k.vp"qӄ I+9d‽ _6IĈ-{%yC+dzgfjVU]"?Vf,ګZzv PB, *3–@6DF iC]>ġKLGq"-tEx9JTTvPĒb׶^_Р5qk TDZ\q$ feせX\I aZ#51TMC^\K+ #(V  Sx#`Ƙ5^\<ΐÃじ [ٝ;Sk%kGK>RPWC%>a >GW2Pvo $0ށIxKJ=n^W]L!"?++]7*P*y9ڥ{~w(yϲ-?.߀r>^l_Qh(? u.M[M rR<79 }lwzba½| ^%DI1-kȄLh"1n+h?M4sهjNօ_,Ydôe~NhdO#SZ%\wV Ӭ t"ZJLZ !yCz(dVNUܐI\J/h. b;d `ӈZdPl(,B8ޔД0%4[ xrВq"Me$4:s$#)ڎ0G2C8t{AԾv47vRQr%ȵ:JLd7HETt7&dBTPα+ƸDHM}0Xa{~*$()]jE"xz C7Q4`aΆO&NG+Ņe WIuHl{G30;dEUaH.:SdvFPa| ʙ~:T+WZfbz%]JE_F]kV_RAՐ"{r wyv@U$CRD˼5(m3x [u5 o5G)FQ#I68&>I/HibghLh huƫ2cZ_o|p)>/;+ӇRovMvk|e?ñćcWCptU_rTv[-tM9Py>KG鄡M |BxJKo(=|)JXq\(n4#f54Z{&*gKM[] `1glei "q,#NМ k@RC0%-HEDs̏ʃ" "80]o:`}R\]aYD*ɤ@T 茔v"\QK<֟j7|s[0Y hrnV6ӱl0?3ЂRb2 CL%QMfmYKDǬ%]kyI(~%R'lW.[6\׺Hf7GLMs  sfiXX</އQ/拇j?(E >{Y8;Ƙ+Cr4]Ǐ3ẑ~'m/li ʢEWU yU8ToqJ)y0~%w"DiȑG>}7Sqd'pw? t7[6baNosGQp7ǥpo#f>8t_U[ffij86;u_S8MUcSET4U(U(62ggiE=V+O"H ϱ(gtQQ죚KT<:aO2@`cm8ZE[[-*z٤h -*1坷T ۰;BϋnW?V;"O6^^r%u!l 7M]՛>CdMe͉Нmoܘǹ|nadp?forYn-*`j)x)_NT1;$W$^DP?vMuN_JQKkϗ!.c)#[֧7&`3m#]qϤ|y10y>Æ_yaϱr*;fY-dg,AV88_sITT??>-GB}XȖ~!e߯W`BE/bN͑/HHT|aDZb;`7ଚrG H}iIne J@$sԖ[M=J'[4,0!Q 9(ås CeQ0éȝxl=FblF{,L'*[R\dBrTuR*R9S~ 5V2}Lf1CwMDOtF4WWftsM/T]*VwU"$2KmYJר˞BP?EvU !QRITZ˕hKj{ %NW uQN VhC9}oCp2¬f` FWq)ӰZ;mJ4)≠BӀF: ,7,suEoڐVwsy̮nau,$j쌇ǔV՟$>V&̂K #-1V`ȓ{( f s$σdk9Cj<d|QNVvpKކ_ip*SLnei0|aXzqMۘKlȲl%.7v md8D- 'W͖^9 { uo^P4WB8xJhؤ ]hlc!ٙ3,}*¨ii?Jߑw>]lf _t ?\DI\g/fz\·xR{7wfnR燧-`Ϥ@`|`ԕBJ ,'tせba/1$Fە>ǀr1H)ߒ;F C{(P~Xa M4˦N.5d{WSJMIwvi3@kb@ J_[NBf1C.% $}Hǝg%A|Q4Fl5c㣷$B\Oo%ҲOݭ[{(k,试^rJ/mQrERHVzY#dxX8^4(:I+)+ 6-BT텂8Z2nU\?^d0EJgtv&U,dj,}Y"6,+7,unLD%nNMp!@)o5>AS[hM !xOlHN8_Vсf*TK'xSixhнmq 6&C_nVdۺr+ ܻwIл8#JDnO.nT֨jy~r\v}ʿPP fPFԁF7͎7Eyןc4"⒛-}_yL?,l9XMRZ99&Bu c#c}3FRIt˕Zʝ4"p0fOg|> N"1-)*B%!cGL^7WnkJ B x^Dt.Ҫ*K qhQNjGSqDKw}V$hQ?)]1G!ǎc G'񍨫~p30~%p#I Ѥ[IWXr.}0= rBx{CHXMgאku!b71'v%L0SD9l'Z!R  mE&rzɝ!mCM$wMp녿?~-K&_3Ƙ Ucmlc#}NH::7֒Z#6/hJQG@Z6ZfK ZpGоFi5^"YW{=_a, xֻ8 D9j{s5TXEv~Ǟ {m/r1/0I0p\Vޓ-7+ (H(~pŌ/PlGZw%*a?xD&Є܎jˀ>طZZߋ0HYyCrM қRz+_[mx.x ?]曺|Swo.7uhpWyyN L"2mYx29.:1 o >&Cy=?Oo]/o8ʹRgDy'AJL\5x7ϤwE A}"`}JKz\5%Z;Zxi1G}yeїG_yt[n!sHʜW6䢥R2/!WeQRrDŇD{>QpQ2Z(ca֖eIϵ,a[k])o{e-dmgg}a1xfr¨ (M|$%Ir;cKqt%u $&JbND#K’'isN%IъdUC\BnW॰V)  8꜕%"Ʊ%3%x4 kmG" tDpm"8D06b`.qA\Geuh raPH- 3?nJضx*4($&*P up9W^O]\2ZT]ZgHFO~5֧fϏ4]E-7ZbOq|=Ed}=~8d2_$|?aPn:P5W4n8^[G1\I5 / .R? `0FM;c۠'+fBXftT9fMCj1qX )d5L]MWC 9u /--"o8ሓ"8n,q }p7ۉ*uVbİvDg)G'"'vTiF6NS&:$ Œ]{'K`*IݽwXmpibO=~ WS XDg-r0o_zq{_w%fd̰󈑹s3:Ys>m|va`{?-?1{liw?;dU;6wۻ&Hd#6p({4@eN)(*-DpI,{7\n4{BIwʲ+XMqCdW܏p[(/b2˺V6&B=KK yAOA2s1)sZ{qj+ϭh:Q܊Xfz%8S爾>X>pyB7WAYLQ2aچ"& zL@9^Yn_>y;vKOrtq֣27FoCZxvS\ A^Q!XCo?}j'ӔȎKs|ګ%9hx]x}Mbﬔ`$kT^/<@y-PȔxk =Rć-oB6`ThIceHkx.ZmW-p$ ue5PU8RY6XeqSz4Zv({PInKMjpnexm;(tˎv:/Aj(8JQXܕ*B1%/YQ?}U7d=-|~pJ1-ʏ򟻱J*4S3[0Jiy';iF6"CTJۄH4ȩ0[h![ቬQdTO+ BR.͋(e|R7q2sH~IzC#$S-sٯ*÷@i%tz.cUpʍ]s[P=N۳ \=hhp/!IN Y`㷚le{GN3`EhN9SJiO(SrQK6R32s *qEl#jkQD`4hׂ͖tEE~-vgjCUŋ~!E(sFꠁ._=|#2LJJx+"T"Os1A9&No5ba܍Fx+4~O~?Du!D`G1G0$ά뷬GPٞ:%nCnc\c1ד\ZRiC4`Eύpp0To.^mZڂOk ~VpQsb/mw{7!2xM81\1a\>?3N1[æQRƊ8"L #]PZ[Ut"lpJA$Ud퐤OtƵRI?k>hRdyX2n~FƮ{NSB8&b܅$ꍶ2֙J* A2/U:Ct$x$-)FoQ)8!VKQԃсȠK%9A{eOI T(7A2G5 } R9ZPlm3Pn1IXgeeRkY)TQ1CPl9CY0; GHxETh!Q

=> deY})}?Ou/>i9 L#ӊ7_2EP|?Px;H GTRlQ>޵]P~RRNz{EMo! OmI,hf@зr/ E*ieI>&i(GEy& ( 206\ޅ% RIOR^d4Kʵ**Q* Áԉ輍2"Ft.^< =n$Tc{&+{#Rb`!zꞥ*~mnps?ݍW#{aPӱԄU */Vy aȕvr_|KF8d7AV [}a#xx۔("Q$& BTZDv~_2B Llp[ݢ|]l8$׫_C#k7Ƥ#7@ckOW@:P-]*7+Q~jan!DKVwdXڂT2 JΈ`18C,Qġc Q&j4΢3\mWWI;4D)Xf5AoaTJ4`8D, 4 F/J19*]Dڵh V_ Pûs;X6Np2i> 8)9t ;- y'oMc%^EeO%*iሪa:G29LNQ(|] ZI(Mz lWxvR~qP]>&J׽unp["%JP)&4=N^H9)EH '1 {@ƙqI}l0%%U-F!\(5(^VPU9SEʕ/YeRj˓nn]{X"S'hexl ÁYWBu^Q0]4iNDw{uw\%;qv/;ꐌy8%ӵ@l Fm J+: ڒ-MӲ&94xqihtX.tscvF"*Ii[1p|N擆N)QO*IifhvYZ−)yDN2kB@y-=կNjBPK/}&+jLmOb˝g+Yкz:5jrN(3B`T\O23'uX)4IeDy<ی[DFDeZD1cWYEUJ%O)wa{8Wo]]|@6VֆДLR~ݯz4řq̋ òawͯ]K.ߏaBye娰ibӄ SL6,]KL} kH`)cqHp_WW6E9{zl[?ҽL~7^|wzADK U!vB~Sqޓ{7雽g~soȎ݁}{HLo+nSL͝4=_h_!vZZRśT|0<5^ 'k&BkFW۽׊E2֩~{V~;V3C= fZW76&י{~[ϻWgN\kـ~o$6[Ő ,UE`MwצĢ]+#7`u pJ(lABx)R&#ŶJK˄ܾLL-D. '>ejH$,@*ݧYDlM3hwʁkfEr`{/FyV~[gZz.]5 ׏AqKOvFh"w$b[|skp?8LηJ9tbPT _I|o)Zۺgl5imkacEݑ R?QzBsu!`՛lyo)!M|W~ᯍ&&BCF&~ޕ^Km^ nf΁WA$gr]mEVM\!&}I+2Z/B щRP\{ʟ^JiNSL]A=LHT$.w^5{˳wB5,)Adby*ٷ]u!B2XXPAd˙mP5(2BFpI2|^Ȇ»ؐk)rY&EGWU KYyitCĭ&v%o^,"ik;js8~[Yx`Pŧ"T$M!K !,YL/s%euj?.|BCQ#Eu7̫C1"-k/kQHE`m&{PZ.0Q&ݳ$0#t|U_ X}M51L_Jr'$jaC)ֆ\ЈHG>jam=cE921xɲ_M#okOJDOXa=zV/;Z"slg)u(L]Q`4T2!kvd_8k $ob*%XcBWGg#edɠ j3`>UB $yĶ[=SD9ʓh(&쮪"Q*D6¶GޥLR,O=ld;IنHl6ˊvP[SnFXQ6Tʈ"+;)"5̈́,P1 sցTdeήdQ$,y/+M`!sV E>D'r11yBr,\0FT9cR`?-'ĸVW`g A3i5d̖؈#jJD`:',j ^!FMo* M"KtDHivB%&RKtA^ n7QVoaA=},5wN+buN[e`[Y>&Ix1ߜE 7ڋ1 ˓H5BRk:n+ڋ^ŝ<^g ^dCgXx@EّNևPTIi |fvy,֏MȂXf()E-Hsj"cS)ƒ,/5j`l!pW} /-i 7oKtBx@y>q^ 6*5g0v~ g``t"()NnхNbD/욪 KIrWۖm-jclڴq^c1VblW.39[t~rrY0"O7_/7i< K0 4^3+~9_֭hNZ^%]l{$Yg%12xz9R ǧQ9eVdyM4ʇq[O,r9hI[GQXJV&qG Q\,D` y_$8!Z<`ʫbj2*BKjɤSM%q t~:T112 cR4 KfVk\qŷ6`[HlBIH̉NZr.JJgIBCkArRK#dRS/&eł VG."+/k:YO#IZSR,yNVq:{gG)}XOzf'zߌO~ܗ|8:洮Fѽ7jro+6x$ӮOO>LJGmhjfYGmQk4ӣenqg3V9~wzJQre<|lM3z.gڿ3B_9=hV;~;~쇫NEvryŸ_TZOO ;CP3 ;CaG6>8BV7 qE*R'WǀdPeWx>wDIv*&i]5n)gmKuBW%ҒDvJ)VFQfQ IWn ]&Ru-٨FixK%L@ѩ] ~S&g=`y~דs^'4k4Ch5QBP+OW 7:̪Ʉ2F sʊ̖[LM}}`{Nb4 H_ 2o;y=uĶNӠ5Ԟ':L% k A%3}?0狣)o|QͼF#&G@EFs3GwCTh2Zbύ0Ln\]!BD)IIBfjeMu}eYYWVΥ?Zy[Ȳc/Y ]d&qH&EX.dPÈZNԎsr h/W(A5Dt\fAq6Xq~Xm D6& -iP|&v%}t52h ƱrUjdקJ.C ֋RqR rRHy5wgP"}ėZqnT4c[-/ʊ)Ȭ<,E;f(_Z/{(z>m+a!sӮ>*ž%>z&Ȭ(cJM26&FeG} l#~m'j($\񅰼%xcJx:_yɬH};d $vF'(#%QeR -Fb"u]Uwu^V@$#kv{BXR #/LiECbYfDQ΅tQ"~6kXF+UhÀɥ\&m=@.f.Y{o߼j\Zu-UNYe4k`.q-OՁ˷eepz w$gt9yT~7&wj?w-*z&>JᅽNI7i^njƒ )˄7^|Q*#O9e!Z"@kmppWSkQ]KI幯YBm+\OVf{( Q dʅXU+'6 h>NFȩ SDLm}90:65:rM IYc7/.3PB,/d1|4yeF9~իS`7ɬ&Y.8n|^坺N=*Ҫ/ci&&9;v&VNėI ]1w%٣:Tm2sb6S6DmR Ux+fUB[¹Jzd ,Tm|0MuUbՂ!Y5g_=ܺܪ铲U'úZvY)UXyg~:fmĵu ߮lZ=&]S*熸Kw64eÃX_ԮfTO>CʳOo2,PW  s5ݓ2i\Ap d j$x%Ȩ`iyRczGQmT4} v\)=tz dژ[ڜ@9c{5}gF\[؀ܩ y>VkdqbzXSŁu Ѐ8+Fm ]2!71Q&!}m4PV."M'f|2sRp8(}}YjN,IER;@vQJW 30)Lz@L1flgt9o~~3LU4d0*,_7>|^ iiĔF @&ի"nᢖWuApFHuTSRWV9C0|>)V0WgBmbl/xCiUO!Y D"R(AT8eMszߴx0CP5dh6xMɚe"dأn[N. 䒷#ǯ43j.c[gF[痛YyuT=w?oxbWkjQN$ ğqF7Z ]{]77|@TPbNP˝eL]I _Ԯ$vhһM(9'a"!(H-+fUv:2"PYye}z^,sk|#[LlXF8I{.׳j͝ #1~sY}لIm Or3R>A%ltl {JNzT. N$΢)'ld(ČV97UNQ \3\ޑt px 4e'A$udK2~˲6(kz; \,0FX+@x\J ASg,ʰQcЊn,X)>Y+[j9`jg&GQTq#Dn2جxRVS|pu%T!~%YFFKL2)]MHe$T bK[h"BJ "jl<ȵ|\4 zJ,g5|[ًf Bl܁9*KLu; k+hEFCa-帟Ћ9Ť IJؽ(y0""<%gtcAz5%wN %kͲ<5i+J&-Rtcl !`?26Ӛz8 ?\XB=دua)N:\@@K+N#܍A`]p |ԼzE`Ճ𨩣>YXf*F>~,l@SjaB&{Νi p GDVTH#@9{q @q`ȩo+X=dmToLbT C՞Ť|MFA<¯[cRd<͘zaꦐ$U'Қ MFA|g՛C=wDR@Ɋ gI~z︽ꝧF0ndeK#9{qpupR)я{::K^%{Fk61N3~녶}жײ騆7߻):kNl=+x_AWM]dy^si9Z5T%|ZźY|(f؍dsVp 9Y9[fq9׸[߬؜-47nf0qK,wݜƭ۸e"ڴO-(6{70}՜-oKj"o㖸Y.9Ἅ[][ݚoϱmԵ5amqFڟ+ՀJh|e) {0҄u:#&0ծ/e$ 0}1RѬgATTV0o%%Ws%.Yg$9oĊs[ ,s4S^<Y%)V.z:ɫY %0'tʗZfƁ*tA  Q1Te_ʹDEro2}-ᚱp ȱ͎O5i̅ZTjgJNޯczħ_>et:j6)0. )ϧ3R@o9f;A݌(fc5sRpR)Q˻(XQsuþ/Ltc@ߍ?۹<+.{QOoϞג@j8r8'ٝwW/jhsuQp Olfz}֓݌7AkgUa7߾WknG dgP8'@)&2)??l-n ?֕ڿPۧ?_}d+wwWxz| ~x9[=?dnɇUfA;O>wwofd`w'nYuru9 JWy,cJzgփo?<n?/p4_-M*ON cJppa^=[|wϷUhc)1|\~%~|}3<=6ZqڝF3o: o,d=bBG?ͣn}qI\eyn cI۶+^^z/}+H6SRHɱEQeJ<̨Eı _etz*\\y6WJ8?^MyOyRh 8D<ׂ~{G.\N@>ScUiƚi-{+ p(hzf=Pks1}l)f˜\ᇁ  { *mu" ꇔ~D""#Έ Jᐡ`*C.CtUxK.#鼞 S!.A?»S Ӿ&v*-<$,%Ƥv^WdIϽ$^'{K ·{<Kâb}"Q$~q,cOcXxۧPHVT1MƩ,gEG'ǟ;7;7qw1s `$"ttT`PѵFGxN?M\.cuΏ?l.BS??r"Q@rt6.A@&)>{uDnkkmЯ`+?__X PWlW!c.cBjvxgBIZgzl"q[>S6DEGFa!6q]L!#z\-ɖDlź,#e$,5Й}\d[]vC3!8sroi) ny-~qJ`և@R 괅o88bx kЅnZ 8nZLiuB-nMfpӛVf u> @I{;|xn?|B?xK[wA [o_{k@vL{?7Ǟ<`f9횝wn{eIO|rjvjh~kI4"#HEiNv>O7R!oӝ/_"TR `fYJ .tꚪZE$.pn>rm*,e[n%;haAt2HEr=O7_N+Ѽ-щ@-OB̖:i&'N. sU5+%`A:q49`ζ%EJ%0М4R%TBmSD*E)gb@7@KVL5u TJ^1_-GO"TGM7n5զ~rWvs}4پ*Svsn#ɼBUcV_$+j,r1Ȇ&K)I7hzSYrY,\N(&^AlyZdٖ%i&vea;L;:&8<| h q] u>wx5$Nu <-&Uh$;N=.z;?mˮB"B@)Cr$m}1-j;H,] ̱JwYohz92/K gEЊY\24ZGAN#H@M5+? W)tT?6=V) f*vzJlu΢RPȨt:nQwe+~ NNUzipCa>Ngg=Im+DD父zjkɪ7նtsO <u* ɞ!9XF L  &hٖ{>*1Hڡ'@EBbt9*12,Q ƚIlvh7z? , GR4KW<6'CTKdJy[tBe 7i~@&hH*!I$蒌mD[eBt)gɚ@lpPy֡O;߻f?k,aK(8W3ȐwD2PhF©c%V{*{ѯ7T4@+ou9\ٖ+2!i j#m R4ӪI˔¬e fI! ^Ax9X#X8-:Z9,הKFwBo_*abH%] Rp,8jڳ.uڊUxڻN^)t;Q&r0)a׻˕ՁaM\eio.[+5_5%Q/+9/+,_2zKxL]Uv m v`M僳ΎpOZY%U\Y*cJ92P5|~#NJa1xa; pLTTlj'1+MNo.]#ZY^"OIEY*+ıl!S&Y"=,aaϖ]su<26 K3B(aTTuu+[oJ+,JfYZ 24,;S@9 =G[˦KT55=reULXіnh=<$, USl0K[!ۼ!r=ܒ_N'h1ocw=8D$f s{ï&'@#6>`aFV Or9ÓsyFٱI.NqxmOe){>5' 4 "+'cE;ĕ<ƒAVI]@Cs! FlQm˴-mճ-K% ߂ ||c^_߱{T<ί[S22Ca9oi*[<3V.܇&Em]0"{XGxش{\߾?>l1 cW/UV]y1ZB<:=>>{8mt "R~ڢwϯTɣ/נ\lO= IgP` ZdV4Ow$aw0p@9Gݦ[GBe+_ ݋ӣ]x6=gyoJ>2h*\^GKtE!c?\zDu˫g?=y 1ί\]_ zK=Sa/ަͿfɜpr=>yw=~Kբ|f{7/Sϓ+Lx5? I"V68. z S~ԎL3VӚ[JV b-Ph zpYz.%#ӝ{&)c@`Ξ1s\vaw׏=:N[TիRAu=Yw>"qa=#'nE$gOgc2y ZZWG:K:Fff CsV?pKfJ-IGwxQ09]0ib^ Ytv1 9Ga6"i ).gky]zl"6ފM|z?HAHs$C#"1DSCal"W!?vdxcӭE֑AȆȵ LzXdӒgoRʪ$h)puB?w(bߴ%Hi޴5VL777Kd@fpZZ^|r> EQ^:Z^}*xs̵\F "BH1%EQ<[Qt,U*UMEQZJmi `oW&3h0Uɪ/ z?rc~١3v I.hghrC&/f=m"?Q%1 sAyw4U*y+Ftdz|M4R̊Y;Z3l5jdT>APxl>nlYq4+ߤ&&70wN"7 =I4E;w3\$xY4wU\9&h9r6h #G_UT 8N*CG7ʯo!Wu7tU9B.$(n,ׇ Ӻ ] -W5WlvaR` v0YF}X6 ]vPURK6Mv<%(U9oH>F 5 :嬣"I< d,&(4DG\/cڝֿ/4bSkLi׮]\=dVMNe"pvM;:ɖ( -_,֎caHO>>FchP{얙=HhrR h,@S<`K5^1}H}H̐󫫁A5HyV[lkXcF1p[ϩccH Yg)!EOF`sXf; 5%ۋR T?~DCeUI%I{e$K:UkJG`#a7q@l-%Dl908 %ƢmjIqIV~ճ+wfv?Zc.}EzFJd0<8՟ė#2nO*p;.,g&) ǓLVxZs©\S S *s4|-"ygY\<́t+.]zV!S^]*PFS h;^ʘfHt3 f!n;;\\C_m$rz 41JrP2y+p; J~+%} JFdsװ~A{l J{'^a JӝsзjPԐızrVK4A}[]:A\~ੈˉ=c {o܃~ g璤r}&%NmKc{pB1-vLb?-v^sRcJ;sJ\P'w؛ v2O\cMO =ޟؘ۠;&nMmJ`11$.{N} 1Mzޛڶ*w84n:;=50BU [w֌Vcx{a`͔y SwPmjG"'TXh^/)"*~V },EϪP9nOӲ1KnZezASA-P}SF`Ogfx~}_sʫ:ĮG$dbL33z#W&@*g( RrIт,L"{GIYA?=U@ihCRr܏#E ) daݜGFz%S4VP_^cCʨ$@cl$:Z)npb98H8xt2TF)4|eM9\ hU0+_&ל|rr_~KO^0(a?ǻ?ڲQ~z~odׇP/iX|Iq{"%8LfB2,<|AtEb=̫i w@cJ]_u}^!#_=p0IuO]OӇ!4 +ϝ)]IC}ITU%ޑJq5L D,N<҈({AyxFE.L"ar6T%@9gIB۫$O;|3w J Ca϶w{!m[u/o'ƇUa;S ^iH$Q:] rAe "  dg}N@nBE< +OIsi-ڐ2`CvH5cG[* s| )H9$Ta9J6匊)gjr&Ğv Qvh?2Tl֍8o@Pڌ^p%U2 "PU_TSUKVI] :|/Ww9s4^\8-A7]@[/>l$#Tdp{IDt{Ѽ@2!Yhe7! 8"7kUM7w ѧ|J4 qz1&ӣxKj&N3 'zJhÕ}UQA8`!cןQQXum-yrt 9 K 1mb+սKsط0(%tFz^?^cRT8L9$M +(G ԍ#[1ػm>=!f!_,]-&êT㢓 |tw4ϳ:))Y)ktR)qs? >ۺ \fb9&DZ,s'U,hj|2zlv1Z`1䲍A6)tt.G-Zq;Mօr8E0- ;IqQg&^b*aI*=RUgEzE0ڬ^E# 2QC]yj#Tԁ{f.e d@MHe)POtc\uwb?:r.z9=nWzX34eQls;fcB1ـ\y{_s$4#C!78cil*D.j 2ͳ"Jgv*H+Bݧ ;s.S Z7YYy#:7+p' dif6L &P O1+w77SAizwmT߅8mVcv&f^td@ڼ1&Nj:F%bǸ) 9N{''ɝCzQw`U0(Nv"":i%pY %nSj?AvXտ)-B$_ׯ6/*pԢ.A7j,*dmwu$Stj3?{0>+CDž[bVL!q =XNR[!Y@||.:wky@!EՕ,[ڊ=-ʰKIn1]FbD; 9Ns8j #*Ց^/VBFymF ="sNVw~Rq\mOW_ɭ{+5%σX!㣧~`QDtYsEN~DgJa(·E %KC!":)hnŔy1銖u'Goe:J:du븨@ `Qw6h$_0s< Y$ݍ`v$2Cn]ĵ2 ^ `$0*ߑF~M_VY /Nkg@bJ0;6r NM8N8MQRքcPaWaE|ŝGvj(jwhlyS~ޠi Ʀt{+w ٞ6xgPū~pD_ ~2ӳԝOU#V}陻|]>~tzz$>0 I9M4|ܸZn| R :pc;vaen>}\O?<˃ZUkU IhqQ6;dͅuAuNjޓuW!@66F1>Gre#}8~k8jUWUQl oWsxDa_-Um p]\I)Wh.{&# q1svhy*w/ 9SJM;|d>d9'~Fb5)qnNAp;pI8=7-m\u3]^YF$1܁%(.GG~P "{-4y BrOâ3_xO /u-2gl_mb9RMs $['K\څN|'{ȇ8rkU`wU#|tC-=7V[V.jkGB~"Z"S*ڍK5 VH,)|ƓpJCnsVr[ ;":7ls^F9|η&h.&Ggع/C6Hf-CJZm¦.>022 RQň-%|1QɃ|o^>YByxdlqN|e9ûksc>ц`d9A+4R)!;I7’}wjD:~neuY4rZ`$Z*ƣ,UTseYeAK#rgYRF8#' H&R/ ,@{1FB Vߘ4Gueli籥wOP'Hp1It%(sTI]C*XjGb ȋ'ÿZVy ŃS*XbpaTgoΆ2_%?"|-}0]NKVjޑݪy]y1ӤW vB{~S@<[t1}fqs29!R4?2gF˼/Fi.kz;uCS4W]˜#z=s pR7r&z9[4_.29»{5:\[Qny]K{e2xƨIS ӛ%̞d{c4!xs>l9QAFxo<ĩdz&%"fز` s tBBSL{S Ƈq#eR*&042ofv\?9{4H;ٞĔn;fLywG])ʮ&Vs}s7nKM,(AV3%b ĶI~Φmw1&-3E1i2̠1jYoz9_(,,0ǝ9+98lgzYS6Sβrg2: Q ֌B4y/2wfϏWnzkWۜF:ȵO*^Ok+ Jny~,{ez-yɒkZ;ݲOᨠ<ڎ8zmA.nWιqkqRLϽ@KY`^蚃Y3ƓnBfCo)$Yܮ[0x;AFY|Q&Ɍ0Y  7'5%Lx"X̅' L1 GF"DD,:b.DJ\\aPAۮ>zM@#tOsQP kcY!'klؗx]);ux,[5e7?_cP5\,XI:(fhX:?1;U_Dql֫{1G1o&w9Zc?p u R$FpTB ",-Bsi !,m!$/Z`yV0a>_Ns0e=4K}EWR^1qeS$G.OYG;Ϝ'<Nl?6=~&fY: Ô+lx"ũ앬vIh1q($S"Kke\Y-qT1qz)L.!uJ(/(BP_DBt`Bʓ :wM<#[ g!lϖG)W$dN$ҽN~&:9oOt 4:r3ZY{f:6J9鉅g5>{v y)sNY|71,#Pnz\ŋ,ί.~r`&q4I.^'E$Nh8, @pM yDX`D!F\H_C%|2Mh|`Cq6"ZPi'k8F[Ij!%W$b=Sz0fS;-Z;7qnVA$ 8ȭ@ɠӘ9 8䝕RH'y??Gz>4|ew&{~uoP%1ؒ Y;v y5ss"+a"!RZ b@MVey=uK;80%iE W:z$€ѯ#V7D21}%KƔ.JSMxK4ZRJ3Z*ODqf8"'TD뢴AcP0ʐ84u\p0{lf6*"i8wװ+ӪxէG+`4W1 4wq@H+M˹2*{ \n"4%x <3Jr0VbE+]pRd]w>Q^U7o'ӛ?,E1v#'+E\es_ d8"d0O b[q }{-uwI"Ů"sx,ɧ|_5]Ї ۬(&jxYst. h-F^Zsz<1c^ …'!kXҖiքH I}Q6$,R8d"Q)|EQ Q`'ƒdlPdY?)20)mY˘ $̦΀R?d[)IZ:޸"{1 Cj&O]pgjeV9W-SAkĈk|VxB_uOKBX t<9-vR.NVs#6X6QX@ jqzQEBx+~"% Hb};f;NFpN紷G ++lq#!z'{%XGOrFjb=枤`䴃Ý(t&V:#\c:OS\k<_%)T@*0}OdP =kw&-izvhmη91hmJ KݞZH⵽m0q+ێæ+*.!e 67J/ԓxjnc5TךrN'a0ww ǛWiB)Uqq6)+}6kk Vs <'˷\,%3Ҟ|qVu_0Êpo,04x7C7,~Tomf]mYrC*6 8",V2=<%VU9z_egOOrLo̓|Ij.t`'oE5@9@7 7Az Qh?`o5x_ ㅧjK>l0#5Cª#Dggmcco_1'/\?LߜҼh6-5o..AD1ȇ@dg@sF'į3Ӵ?ʰ8YLf\DHHRqmAC$(QD2XSLY%^+b;jt-q|c2<U7ExIsVʔ%r#1EՒKctˠ&7Ϸ6|\] Q u٬/GЊՅ>#ozS)1-73+Z9Qa`^>E&sLL! $d>14@yBC~yNu 9hA7|(DMPd1x8$os4AغIrX?'hɎwLYU Ѹ%bNG7v悐{钓RK5DsO`<*LD) h=.HO沬Z>.e}?tٱͻϕhWiK߱ ||Qz8M8Qb*_L~1fQ}tMoV֩CNJ{v܍+C,:'SS8 7Xa,(8jXZ&2kZ]C'gNFWwO~)Mb0L`iVܣ])]>>3PTEa띦+f2e>Apbl.(8TN[sު. gMrQH^ s1Yη8"Lnj]*[0@k5\Yi>PNpQOw-IIMg朴26_(k? @/D]ShVcxi~#&:9ݐMɈQTF9ؐt22ZFpKD)3ZTQGfei)1RyIڻ`Br.P8D wGzujs7biҚՋ% ctX\xJv_*QE+W=|{| $92{VR0=Vkm=$׆ۇW8f]}ex=4SuRr] ~Pj,W4RVgGR]ܛyxa07/ |&9;Q/F.s\}TU !+8є4 Fx7ߠiZ}EۦY+݊z)9gb\,_52@&2$Q|4Rts Zx`+, ?kI ik[[+bT7 ~Øэ¦KgоV˴>$ 0TWK 8g0Y wcҐ+}n6ޙ09rsՂ5o@XR8},;1iy0^V%&yRArBwO:~ǔQf2 =qX VXMS?ypdY3}0.ćO02=sHOP^`ɋP47elcO2OsmgfO*+ߢjYߋ<{{%5+粸5+Ek]f=;w-w*jcSyU)ۮ|X}:)ɗ~G-oO,@;#Drt/ J4)O=.2:՗@!'W+6cWl^~4}gWR.!d{ix|g.Rr<呡 yd]~ES: 0?J>-no n{ymj.ݷGb%=`쥻NڎG+-OZh]|cJ8/phf()3mRpT~AhW9M1ӨLq) 9 ;橰}t'x|oV0|hQ<`֊5nע=ijVg&a}{RXdPHjDcH潡f8J䴎0k0C1cL )1bS:O<<X$`H׏kaypG4 @pN}mx&8Cb:5lx!v|h)G,kG>}ńrr*ρf ۼCeS/˒G`XvdsYOSyɅ5Eƿ`F{דּ,a\e|.xDog,)HI ٸ>}̭H,x>MSgdro2E5FFjDz> 9;lzbPdl]pͭqW 9k' DdwQ⛠,vub(ru޲t~zCLjߺP(߼OEZ^S4}[f NEZN~*JI^^mIW-O,VQ-%_\fo<>sl8$O9-+Rdgjl/s6C@BUY{êSoz:}M[^25ݚYsujgy MiwxI#CGCm*t$5݁J$<5:|zJ"uj GqD 4sHS!=.KE,*,RZaQKL &z^w~XOO5cv(7~?GxW鮗1w TtwMwa};z=_L&E'T'c3ч6<ݫK3x}W|/Sk˷ẹ|u;eBXqϮP_zF!qSVҍs(݆b:Cqt; 4%J18/΢xjI=shz3N{W6a@fThDSIqhqUG/gV5/(8YZOQL\l\g h@VLQL\K'rb53M3B %#s.ƣ索i RH(Nb/Q )E .U/廀xli؞riEpX3쇊aƦF %V%2PX}po[#^ϮW@Kw^B  5Ic,p6[l a.3FH"F} X{n6 Wh+7V-oɾmyww!fMԃo̥V<,ƫz/, WEݻL}.Qcv (ճ)(,hE44ijTAJ8G OT2W>LOWk'|K|3'ʚ#_aea1/+ox}P(p%#D7blt Q* oTȍ FsbjAJ )romCXmبd$KPx5zRL4xĔ0+J0 B Y;W~a KB a10^#,eG+a EK,\\1"qTLѡ@[\=QW8 EWԽ%e^Q^ssJ119^ 9(ո񐤉4 %=8wB{эx(@x6p>8]L(&7o/MoOnn?5 B97xZ6-âmPg\PV>X>k˲gh5F HfbR,%B7Gvh@:0Y2B!q渁%Bm ޹ oR.VåxX"v }5BυfFd\);Ӡ g:/*qt{at _nM,y+f)m爴r&>*A-,/Bx; XcPxaGL#rU(W\u8!&Qկ~!Wai Etߋ=rnv-Nny QɈ\kD:xtfڻF4bfCr+y'}(dTJZKwNOo>_iinx*CUI ԣ‘Ƞ 34ƈJ+P0 =Ds҃I;_d>( %$ *A5$_E潑<$Fg|N"I[3kTY݄WIPVKs~&(#QznO<ЧQ 6Y,j*4 ܃) x%NM@Q> x<!,9?[}*6y PE;^^ZpĀk7z*迭pⱝ)YQ;5AÑz'畴*WNH/W u*c5f7F9%BU7w96A 4`-A%-GC욳@%pT)@&1\oDjd`B#IPⴉ҆^`F519C5G6(~}麏x]_"vTJïUؙ>^-*,M:mE|Wu(6mR`ِLϹ(jyh R ;Q0j\2-/#z7oB>=Лۃ[z{`=zn{q)12OO^V9[5N]z~l@?NMհG =%-վc)n.»ZHo\DkdJv7GVwZ JDn d§ݪ -ZHo\Dkdʀ:lwKP1kMa0X^WYF}**ݨLlxMVɠ=(E)EͦEs3ڭ M龼-3`2ȗ>]_x\==~ :Y'aF-!tcK9b+ͯ=A!i>wh @gn!x [<o +r(53n'ёʓJM #>#.~jRܹu0) דsӴwVeOJ֝I߫Ԅ"u{Z,Gw?7OC "9Sj>FV>}ߝeO/y#81:&Yduj ͺf׵]Va6{׶6V9bXi; 1<]q2uX|aXEF90[zBsA+ 1&ďu-Rik?$3a%,Xjr3yp#t:l}oc7zN*xwZ`f^jPГwǮE Ղ[ϰ CpzPIv)ejT@TۼY +ykd>NG~h_e[چLLU8Cl&iH]mL2)ޙ3%:fWZٔ+Dq=dUaبn6kҿОI?P&+"]fE:TD'A(a.M8dPϜްRP9p' ;EFO? 7q~eo\䔥NR IEp2uQb+= k\E {i!_y)}=gƯdoOF z뛗}{s(ztQ@E0-nhfȟ>Z?RRٷxjɯ{b!8sϊwWYs$_;0^p $hNO~ 7Z!O~>/ψǿ qBhVۻ>թJGB%O(y>}]$@TN8W/=8_>x됩n?yf^,?b!:XP C^zS! ttt IG9EtUċ@9c-VAG܍V&՛1kȥC؞"9+ _X!ۋ戴SY[k+RQnyǿݵ/cf:G=*z㷿L|&i<1_v`F?aL3 {"-6-Q<~o]Ί/ix$bgr9yAL7Oٮ??9h;Zd6Lx{PS(AelSiⷝA-0*Ϋ{+x:ܘ+.xZ lF'i43k)jTU1(~B蔗wx@jeO}{jTϡv)ԾcJ#ӱ:q%MW4-$7.U2u) |$5\4d"tZX!B IZmRJ;)8P}o8fzF1l== LK`޸s֑3플M-6G J~9 e#nq0%˽E@oyǜ!D/PS(53DO 5f(dqIQgyD]zR 4?^DEx:L4HM ?݅я=B .T(Aڐ;vjmDrSe$W m׀nh0xJ8] @%^o8\8er:͂PCF%^qF6 X#Y ﻬB]:ZK:Zd;C+À6v'z:T .9<޹ Bj^>\ 5>c1]d6䫾Y30Tko y Ɏa^(_.p9<>Z| DDON;?r8-p%WAU{:o7 6kqXטKghU ~v~% YQtQ`<{_)aO蒟e{L_tO_eVVwrڥhnZi3[,)gMY)(c3 osDZJ~n%H LY&#:^-W]-WgGB.W1}niU#%~! rtwlx6.H9'R=G]Cr`ťXcJDbiL؇Z W[6;fЛyh-fzTr?RDZ^74ˇՍaT$`:%( ^*5<`Dhxˌ ԓJ+xcb2ER.x{Hy( Bʟ̗}ڣ\ [1g%3 !:D4K@9_p3"!TvHEmHƄgNq޾4k2Ҥ2-mhRL$=BIQPD#Ju@mS-yKGÒ3 nL+ P?8Ƞ 34NMs=wYNoNMivʋtΛre${6nʕMm`Ul:TIJeb$6|(<Ҷw|2 ],v mBlqfBr LjݔBU.yei:5͂YLH${]:ܩJnԢk!bjuʞ;˭g kzCm?*̍e]f'xn&lk8aHSWd@LTf1o[L 5j Lj(b`1Xj.YH#iT(D2a*8DaT72KH%!e:6#հ 86JG؊:RA|S8bx Ԋ& o,sÓ TVb7Sa>rivسit`Re/BtY9M7]<^20A!#%B: Mʅ((zQ$CJ@Ce`Dy7y9>8A~-h)|ΣIBR,fX,kLQGXĔVpv2Ɍ-*B$A =/mnkvx)+i Dq0o˚Bd AH[!cl26Qf ׊PGP$@& 9@<5*W1UST*ڈ6H=c&Ԁra324`qJbJm_^^׳(K׹MB)j*{"GF`IH@v ,a.|uN2W(B& P-ņH,hkHf-IER\b ]0"Āb) 268 L;]3*i/U.X2lSB *-Kc(Cq^΋5/PZ w!^ȽMk}=iQ(<j^՜QnBay:﫹cָXX1.ec(ϟ+BlbrYg&O?~4{H`}lA̲ʬ+=YۚPb۪š}ma| Ӑvc.&#Ή :Osȝb,?.1?:o_L&QBwY1\+#a1k(;a%'%`m[B$2o'aZgAex;Z!w}+yإvWJ'wx5x_\.$*4wQb>k#݁^T˨ =C 6vӼ95.+$W2jOkx yD_(YD_Ə'<Ƿh|*q&jضjl\SV|pUWv[CYz۸V5ZN0D.^VZ͒h%ՊEQlu8 υ.~x{\M$8woe^ӛ ~ɞ :feURK]b @z󅆛9m>h*l:mJU c]Dl!+qyL:4ܡvX)q7 >G,O=7 ~@g(SpJD_`W{-3|M` d pw<;ۜ&_๥Jϳ~*͕;|񧬜L//U'/3 3`ac Z(R(}A"h0a<@+Ty9ʃ)YD~%rM B0%X[+ ("TFTZJjeYw0q>b,6[ڠTpq..L샴)Q1%&9%tiDKGIJvxH} Iz2޻@Bj݀K* qU:0i֭S셪<Ѣ,5"#\=r-R+rSIXA˪r揩lJnl^LNRm/0Dً:qB"NDZ W ׉ZHr|H:A#u"#MDg8aFS2kDXtcND9)>PTc5j<qB B*_dE >{$sbDCLCSg&wk>=ײs{ 4ͨ!rE atn+HFӱu-Ҡd\x4H'ZP% xn>(g=GuP2 ?Ko6>s+n ϘP3"|dFdH;a.H0"b΁'q((̊a>Zk!F+"gT#‚|NRwM'K7ܝhe'=&8n ,.j &.7h0QɕRXyS,+$}?~2$^|r_/=SqOGŰfu~TEN\VZ(Qڳ\hEE(/ֱPSB,EL1ǝ!%#ahl(t%h̀/@y\mXo:óۣٿNN[sd̋lK瞹4gz:x %,z@_OϣqW*~|4}|h޲~~+E_о4X Jh5H/~Cزd~wgN=86W`UL ^7 .u6?^WȠ? t?%l{I ` .%UɰkշNi78:^vA=2 hNHm6yS.jpzx5>8<>L/SIBN{= i7''3/kYj\ɳ6v_dM?ͫ7t_<FӳEo~xs{ -+o%8=6s#h-lgg ejvOϥL$,wuoJy ?کW=.O7%v1~[8σt4v_G?ޒW@#i-^rWh{FZˆ`MNڷ}?;6x<)ɞqgk)/Vɟasaɯfx3t~ntr}&{ǧo_uo 0d@o{O hؿLS>Ԥ-fǣK}W3dv\;;'On1J{`=|?x:9w]c﷞ *;C ~n܌EuԻ;{eө/N4LGz ˀܧw:sG`\uFK "ܗI/NM(+̺;zf~;3t{闌lvA=`rn4<ŰǏzJ_xsk1@7vw2}ٔ*ӏaZ 7 ]ܛ`<nN&@N~>=4'[o߶ϝeL*]e*h=% 7 `/ݝU\MX@_8k)VNAf/H~]^Sgm WVJv>RCQ)GP5CįoDֽt'5"k|:WDX[5՚LXP+|b=ol=(&$V]"ˮWrUɹ )$| 71 {{WHBF| Y+N$2:֊Z~lnZ*ȍl0 n'od45˅㐆KyXWj`iiwaPf 8 2BZ!DI+yDBRg̱Д͠HnLgAr ~wF+jE2$i,,hl#$Di;'TM.nM>pA9E=6= Z֊|N=8E<^VFQ+x?4]8ftV&|r!fܙq|iU_yxv?x7>XBgo>u4>:wc߼\-(;eϣf2!"NdglO˶t'V"'zsI=vdQ_5_΢x*՝t(UmQA);:mڛkHcҭDJ*8俜EF|CS|$\$6211hh)r#cdJCQļ.D%Tw)XFgi%raNK_'G"  k>P$6D8:f\P1dc5.!=UZn)Z]Q X"*uijuPLAc @m.0`*-3TZ!IdcX44ѐ/U^TӊrZs VYMf8nu,\=W)(ʥJ9:UVdp^rPJV(ƫ >@?c1.Ă#&s9;!И+e@$ #6!&͠3,*0hH,XEHEX%(B>3YYOs,= 9Z]l X.l3DPصmD0{n}]%5Am5Jˊ1D_R`0#LQkm]$ 10DLRP˂P#gZ L/"O%g y(*BHdk E F Z[о&AH/fc<ͧJwF -"Ե>>"=T1ySIaM@.2ۂn ZDOPHUL^ ^5ۃml@޵8r"x&]FN#Kl HvsGDzmbwKj)56 c"Y_E.(glMzOOC %&YsЌ˜e!Lb Hn9N|L)'X&`ai/@hS^g ,D'>SS+8cx= guˌ&3JSE=A|DI׻ÑPgb0,)Y;De9rRՉW^K0ᔨ4nrRe43QczHAF"5V`ؠ1㠫 ST]&>qdOp>SP5s:A|DIW&&>.Z1= `ýN$k7elj!>J|LnxIdęd8 ۧI^8x.#ҷyV&sγeշSyzƇS>`o! !$9omA-D(lj&4Kjb%HÂ'#$X"Hya!×uy94Q y ה'$xqyT^xpV/]@_G7?6[ڴ }Tݯ=_Jއ$- цx}2ٺ5[>`;&1sYq{fa_ o i)1*Ah,< jG_">ޏ>V[aS1B]%C?,`bBY2M tf׃Xv'GFa?ZtH+A'{g8?Ơ!״C^PeucqFUH;Ɛ!פ;]&.r"^/# N#_1%<& Ph .1{G0p:]GIaq:lvW]Bz-\$(uxIo؜{=ނP N,]p8Sd k1_岮 +@HJ=AZ'nV~q=ZVs,=Oϋ7nxDFVR/ wi٢  K.}f7m@x-Mkx1 7eUpں/hAv5xLfܚLeB;bV y3s=sؾqi 8mĽ߯QKyZQ9 Z1ٌS>)ZHa֙>}uu `6PE@(xHSiU!qVy4[$"L;`4vy/lBWӭiYb:zirfE}`h3l5"[.n6 3ټظ/%AkᨈF˯U`j-q-ciiysmQIwyk9-rŅQ͙q+9e$6`ffD1ɥL-q‡^JZ̝ rK3UH6y4>F# Ks,8" Cvbem0zMp1'pAWiHlerM%"O[AC BsZgJm-Q#mu~-E/&Ǟb>ڷF} W>k긑0-XW2֗G%ƿT;#y LΨY#EFİc~X2l^٠Z&>kV˩0z<͔F_lPƽp,z$Fꚑ?VokˑIqZ/490<:5Rպw> R1MJ'ُ-&' K'nD.w~*ADYqtN=~vg69 fց4ʻg˝6#< &㯡cAeXFѸk6:@.kD} Y nE]B!MEƝBCROBp:Q[8t:8^SxXExwW=-Öilpx~|s` ٟ?4,#Y6oO*&+8 cmj&+C G,lɈw+IU*?M?$WHP>dןl͓f%8,)tCLg(BU 2(ԠOLV,| GDg2 h xpUXjݎ : YK0 Σ@Cy ɼWWe% |6Wk?d& H՛I1] 04l~e?`Gj(e1>68#9_#qG3_ n{<ȴ=M{ݞ9BFԮ}yx8)vٷ3'<"&t3}ϣ~=t m'+Axx{)M?7 yH n1}&w 8odHXVo\%g}ͫQ=d7*IGqբ˟Rt$*$ܲ߆C0)-(UiҊK_N$z,b@j65 ;yVso?,XRJ5W . %X"Hya!×Ŭ18Jƹ)QΧ6<YQ`^,/atjKDiΨA| !9="@VDgى+&hp`%koz?NwE%ANA FjbssHӎZ) 9ð4R{QʽZ ;.]߀Am h^6? S_[F 4Ha .1` 0b&F iN(xLa4>wlZ/aZ&QzBhJB\I R&8 vöh aI(M/S+5AT42ejJ^sJ#DE"2 #a ~-AA-}_ #Az'h EW\:(r !$0*1:$!\ڱ>$$@*%H~N*eB)lym*h^2m5m4 C-%khSaji ;!!o\Ddoڍ5)[%ES9F+6a{.W0*mE6GcE8?}(s:pA~exB1G8lxkպ7SCwe~]xRKx i"Jwѹ4}UX)(JQ2%Ky9Ty6,Ԍ9p'O06 PX6i= 嬔ټQ;β Fg!0+˻Fg6csNRJ7iyLwG"`2(&C!*u2Lbй 7 VhLq23(&F_b3Bpؙ}8<:WIF'BFK V T 1QLN[C(uVv[Lid&z#ZHm0Rd e@"-b<*f :oAw̩NZCwSws)t>H|DV,|ʒ N ]3@: nH),vUXkV1 G5 YOҤGUi3W/,*z;zv a\8!y:  BV's?;T=e9u=?Ez?*?;yV morGУEh!Ӿ?"ވz[yp- PH=/m^#{_{E#)hhĞ+ǝBFq +ӆȣҜԬQҩ |) gFK0-4x H& Z9Ud. GrJoztwVvoҺ:HdXy'K\Y3O+ޞn9㲙鏧+K1"r}wc9YO翥SND\TqSPn ><_,>".Zro~'Y.n!= ]hBj0!p!xڔC{w.t߅1_cм>z^UՊLcWk}de1cyDgSpESfAqTG̎+cAa膮NvMƏKڰiŏ}QV\pvE'63})M11?FlUzfPxșvC 8boYs'"_l_\o&a߄?]s^bv6[%4hɣ~eh=yG<{G5[<Fpۥx+q~앗3e wmd}v{[o|Q~`e|`X/ֆ'753VNVt[g kmgWiE c֐K9w^s ?2,y9*/eM{2.gj|fϗm'afƧ;km/?OO'ȧ wO4|'n>Ztp/]Ӗ<02$KŐFM*$*?Og}MWT/,fM޽l`}x}2rUp<1ݱF&220_3X':&1hcLF'kd Wn( QW,׬2Z)ob !?'3 HF{j 41CQD<T+")Fur+x[;J% @(Ɯ# lB5XT@e mS\#u Qu\%!Ӂ)N=w`5T{nYzft+L(lpFP LGHkB!mc0/E(u&$JNO2"B⃮R>: . bk$ -X(5wkuސq.Tn>PDR4F%%X> ʥ ޔ"MY %ˎ;lp*ق>jF p>&˻X+PDB% {kMooَԝ2JQ=RTCǠ1 - *f72yb^Hd| (KPABw:wb-RY!`$M. 2 D9'IҒ bAB%ÍBq@$"GɐہQ 8́ݩF+qZPiq%$@qSm2[+xTѥEVE0TZn.zE,yk8x*Rz_ת ۯ˞J~֥V/p dl1~_nlby;Ow$QS܆!k7bu`_ob9\,O>f?t16X\_?EFkܦ~햏nj1(1hy+a ݪ'Z7.U2ը2 @fsnNmۈ<ݑDݪ'Z7.52Ia+f$AbP":cn#t[iZUOT!!o\DdQ\`ҌMN[-%S;F6"Ow%Q[DK[EJfyyJ3vŠDtjhF$MGn-n}H*2|Iz;ڍEܭѩv.yxڭzڭ y"Z#S83AwnT[PbP":cnefN'Z7.52eE,[-EA1Z&xG26V=[EJ]<2B*1(:nUFd$䍋hL)1Mhoڙ  (Vg#INx` }rji.׬è҄w%m#Yp$ z3lAŢꢭiYRTҙ%JmYԕm:@bz߻JYfZ!o2GĐ`<3XG8%4uK]|zds)ƔoI04TaA!W3a;GSrDYF<& l.!KcwɳXgŭzߚQ274WV_d~\NٜაYr֦lK~TVu,[`w|\c') RhH3*eLgvyu~X,WgꐻIyH17"0w?!tJB(N/Yrs'hJY2y'`- 9$M.*moP0бv1؜ig"08M[ѮsGKdkjD2 :R%J)_X#R/^a8 >2'7FI$-Yi?J2?IXy?ͯ}V ZUɭTO Ӏ7 pvff'Z<;Bם}˻3,vL@N\ ysyY;!'WW`%J ~>ާ[&Nq(xu/O8새ߓfFey2bu s\xn xu06]/goЊ牏'ýI_lD7aaTSJ}R "} tWUzίנlìv^c< `}^{z̲W/az^& NƑQ05@Wr_(?y5PЁ3'ֽ.nqvArq3[uyqو nRɸ '`C`OuԵe17(MeP|*>`>篿g>h5݌eና O dmQNwj0p\$ 4'P ~Û C*ТWxW/5.d#mrRFHtiۈe H]d$ U!;-o#xo6l6l6l6HJ@r7g~ Оؙh;/;e5봗-^N"+%ۇoT4_ j}}mqi(%7Wa M +-`( \C7y;~ KgB"^bl&%mR&%mR&%mR&%mR&%mundHdHdHdHn7;M\xrb!!n:>q2-OPmЦmЦmЦmЦm='dHdHdHdH/o{~o ŋgF.gE qn, h*q|0BXO_>롹[zB^B [\ 1 }Kgf57dNb%5ZJNjhb(F\``6=rmMJ ‰N2 | On`%XvQ##qx,&9Ak5ub~d\l)9!_|'q0XhlM .{v'Ea nL98wJ칆9Rدg;r07D^t'WA^\jt٬⭫Hkmu/4b0tg $fKs~)q(Asbzgfym|O2o~LE?[5w(SױüpcfDb;uS8&SHY(ó5]9$ac</or\Sq3Qa3&:>n,x+ջ nG=[?4d>[k;3o3І:Vՙ]ۥA1} 8j=~1sy6c釫0XW5)S`I>*V[ !O0~7 nڕ΅gp IXAA\aCeROFs8oᢽC0CͦeYcǪy fq[pZH!tOMѽxxh!,lzsG@>o9YECgmD-?H6w{Vӊm j2uƕ-#*GhLx/:\G?l),gX<~f5j+h0h=2j+&bVf?f=j7e37RÕHPL q3>L v[e\JǞ oLq ]6!1\){hs,S|.Ƭ8~2GoϼggN!ر=H;ec`v jxzvɒzs,QE⊖Ջ+w4ߡ̎f'HmAr]H%WUkh>8'[{᫕ H!S}qxfs{{:HהR% \y29e7\|TWP=ʵE'Ê @OQ##yiTkP^?-e<wQfЅydo睓}ىO3u?V<O&5/eS(JLgX ycSQcˈBSn3A 2 C3~L6s_[}TiTsq33K5ވNBiT0È YV C j\<2LP(rlRYo._t|86w+%3?Ais4 i{{3mJ'9VXZ9_ JwK3<ÛduJ$aiݰkJF޻V1kQR,6ed1,.+?h:FEyIMxִ%23>1U:Wz~.aHZ|~J܏.ʼH\DɔBG˖cnMy4)pIĴ&n'uۆEtcZ3r'Pbt&Jd ǣuLƆCOmn22ig֞h:tlU"~5{iX7y,ZqQbj5ok_tmF#"2;=EAk\Y NX(^ CeO! I1p*KIH@uL(F81ڣ`(dH2H!s|ٌjq8BXK4 d(!C2rހj0Y^C*e qPTg Qs=lQ܀DK( VS))$n΃ͨX 2!+BAȀF"Eh"Z#aT佌y b6 gDpXNt" 2 ns1艱!P(9VZڀQ,!/QMפ.]suS}[3b4 Rf ZZ,J)|M)]$m [Xb" KL>?`}i:Obԇ<8kQSFkfBybÈ7ܙU @ëF0(°?WQ rO]fx(} Ѹx c34}=̟:p!C)SEhjaz%Ž3{g6eE4ri~}6cXܪ+DIσK=1&T5^tݙ <$YM9M 8oy V`,1n 8 @,bﰦq0M P;-1kZ?Y}5yV@ LЭKM)ˆ *}*6wC _Gqx'f횳&[mDLBͫ^BoHo?,O. )b6MdCRqpG?{ 1LSo/>o8{Ʊ MT/tMb8:0jH^|ك[=ECv3r Efꫮҁڪ^ͦ<`T+ YQEm*!0b`4%X9Xb[BI̦qBEA9T2aZvWvX!Dx;ٳt$AU;Sޭc ByC%^1n?O}wjSV}G"gA7 @@(1g΢ޜIW*sf P\b[6g%0!hϊR֠e3&op2%Z6C2pDWFf_3 Vm\ݏ-P׍Y*'m,EBuD }! -:t8;LgH{8;Fg31ʛEf hAlD>_fp$Zdw˦sj'R<;~HyMjitl}^d>`=Ij`xf b` %ݿ WLDs<-XQXt Xz=Q۳Br9NןY[lIxi!LZ$ag\`Rǧ@dm $)&(& 2H 2RY mH|TcDk]ծ8 mF}IZ>W"] nu|->dN!ZRzkUK ,nT4qP6C-e10nwnU 1ݳf]%LDAd:|k ڳV1ai"B \?mюq.|kX*t1UbZLI^'fXt) 業h7Ukud3R h~B &jlԊj 5zG.Jz)L@3gē%Gu(gcVE"JI>,Qջa^)@)Z`dHR ,n)ԝYKRӶ]2;FM{M8Ku;gNXM)ѲdͰFvzso0SrG'ݙNNbpGRT%Kfk)]o4At& a[]\ Dv%DqC7睛^^N'+9 $*OspKPa6_^ܸL?.U&xyV8$ ޮVi2|>cqdeHt6?'Wi.n,#1,%@h 4G=}3oI&IvZ,;'B㸾!3v#wS7 A a7_8?Z}$OgQXG˴+ LJ]E@aѳȕ Ȗrѐ\1`jɶJG:My}|zd4Oi)DjOl !( YpDh[.G` 'BNϣŋg(G{|1N] !| xgt?m&} 0ŵG i0/GVʫ %je(zL!#K " RNZ%MIQ5 qm?s`xRPh$(@KZ)uD(2G \Hd(yd.$J"Pz("ۘZ{f4T;A"cS=ǝDHĚ .`R q<E3rG=-_˫~ѱDvc `<22qhSY2_ ǘ4-ݚwcn13՞FɾՉ$_=\%{KjZP !^\_QE?Y3^J6 @Y "N{ۋ>,Ib=_-IWKowy7,DhZbĵ_/s)yGހDRRhbfe)?ai=~Ya"` m}t[3ɻfJMg v Of0hwn[>jjWBtoPKPrMj=r#F yRGEZ֭Ѓ׌1LY_ GݻFIUzÈ!bjҪk{$ }}7ˎ5K7s--ntQV~TqE&T>;&Qi$ࠂR9TG*Æpa!`4Hz G*ahMhK.zJAE?Ղeb5 ’-!0S L֦Ee̠#-T[01, b&Ѥ^sJ RxdȬ!iKNT; X0XЃ8JbH QY R8HȔ~ͨ4EJNQ}I8^{>>k/aUDc -e %Ղ֌>G-mvI4l)ѝǨؤBvg5Jq oS}IDEAZܴfZJBٞ~pc;JLw B*#=A#Ht0gV;p1`5'JhQl#RknZ q- TTj7NXǨؤZ0ƏY[*i)iLe'fZZQ-qQK6˜U1TRiiEBhK1bH'7*FG-}ZOQ'i mɝb?z5c(ō`yĥ>n1M#5_ n㻗쮖vo -No)XW"[}w.W8O{|~rYh- S M},HwU,܃DZq!-#BA)[mcd,23& V$SHuJ@2O^U,ϏxZ1WN1f0p᧩od9Y#}Ɓ``cp-_|ojkHvh LRj 5yֶ?mbAB3V/U36#Gkmw{yyF>oI>T#laT>lF?YZV &"kcVGx#9Z2ŐzG\qRs$D N&1pC&!K f0qLioG$'B8OUs6PicӖm@$2Ed4qR)Ȍ4-"={sJ7ƍ)F̵wt'd=}DD5 \Y1Nu[ZS'jK7%3jݒ٪nIh-I$7!n >\տq#"-vD)ѽa;}cF '%4Ἣ], {zJa0w:DzpXC!b|nN~B9{o"I6wYZYqPeO/gz8WG3U5_PJP(r:bZX4hH6XnLO$36!q*ZPJIt}$lH$_ƤcCK(Ru>z.  mR@uD6 WAN % * GgBYԣPj ik-V$3n hi$ aaV]SK*yŀq-_u):bop fX幭B@Xe(]`(f:`'q#y TZOfU4MU©GqP-*T nIӪJ| CEAðK5B7W[,Z:R TJ9@G< L䤲:pB-TĄg7 dR8,QB bXEB"AxKRy:ZSx_L G4T+TeN0QY -Ejt&z,7d0V*YRh`T'^LWT,ezL/N_س0ߌV;L?|1A*gM`]ogLQU}dSIɀ3~Xa띗oE49B47P>[[1y"]Uxi|F]3%5k̾{W*nQhfmR^?.yv2}RO֋ DZBn,Q~Q>6\W4O~uٻyd/@rgfjY̙}2e٧L }]4- TvetUP؇XO4Ar,^"dM&SfhҒcg-Vۏe捯ڍo!%G55VD*=mR3# >5 }R1#miuc:''K;[LS l>Pm|3Ğ(“wZp__|bC/hppt^#ܨ{\jDCg1rܱxy3v&S*tSބS:=Ӎ?`b]\eYPD{Y846GQZ_dh3^ׄ}ṽ[l_Е&,[R nF@yv4nx/mB]j%IG51Bq$=^iܖDz$Da\TX% UO9 T+EAy8Ռ8Y)c'- ffa8!QǙr*@ GI|_+bqCMUFIV~~,"R;e -8'?Ct5ïXٻz.bdΧgkdL2Yj@GQqs6,\ b5Ö~w.` F*JEWYqwDqj7J y"Źf烗H 8\N-yX3GP;Z+9"ږdҍ"{p. 0GW}V8@ewV6fo$5STEm^7f} A3oh%C1&E`'tOO)E@q?60hz4'%{m֜H˭ߏe[nNO7]wMF3r2SW(8*2)^#6,_<E&C=h:\ Whdt,tGW_qcd35;BKپ.w5 ;.?^,'.g]k㣟g. 7g=_\=vLGy\np)0պx U֭Є<|0h B>}Ѐeoцs"U\صՎאt0zn=hC)_Ck_߇PuZ,TCW+t .NdKXA?\ V-N5Pa=Klզ)!nJ%:bTͮ/GmQaJ=3tvR؅迮?C[ipyP18bi(k;bahg,(O nm# nG FWP(Z`8cTꚶ*)}'~(ouOUH 9z߽aݨl̑8u+A>u;X<2Ut@V|(׺!o\Et0c H֭9S6pyPi澭[9buCC޸v)lTY/SIFft=ZJ Gie=;}N܇?HÇy01WDhf-/Bh:0`+>w8ԦƓ*jL"R-C}|^sNk0;1\ 5`K+]m`eBZr.zEaGR湅!Z} m;fՃ[d̘/Q.l}i3`RW3c %5mm0ho<>;1!kcV;ڱ+l)!!qO+[oPNwZPHr[Q] ӝdgQ] W@5K܆^ÍS5Ҙ:7|j4G'RʃmgI=UM2mt6{'<ICC޸蔠=uS֭9S6pTS̺fZ>4䍫hN5kDaGaJy:hc%60m3[UtM2aYN]pz*=?q]]pxk-1{s<᫝7 `漦Th<"4yė"d}-O?lp^SM^\~_aݧϿٰ/Wfざ)՘ևHKޯåD(1P^Hz4bMQ 7e>H/aS$1'POhvҫH9ߏ?эp,¨G ׳oMp)HQ0tjjkCEwL-Aֈ5iL?f[g[F̮+6_qJœ()*}{zt.[4f g3QP&SPfL$9AQ*?`kxMZq6~NtJskRy$D3qCMz Ώ1|ǻUẔg} H?R4(H.Νa|bA(E/riÌQK H^Q H8 s 뢢, z )0v,c;z#q_]/fZx>V0bix +B 7 T4uyAi͡CsAZTut\CHD1/U;y*y0=[ 0(+I{PwquhZs}nI?7Aѵw62++b8erF#٠5tL $sk1rmC3%U [͍^Fޭ7I,gQ_N D"v 㚵9oY1&nu)}Ц\l/w{?wp;j~w nYn~/?;j+LNڶO^Dғtr5-kf*@emldm(3hOKnefD)a8̫Ʊ8dz ƱM GR6Ocעx^}I0Pw=J͆#d,ç3 Pynj u`P23ӗln[{/nY W.d_F AZZd^C׎"t-uKKfXӖELGo\tph퓪vh-Aӟ1W5f}W1ޝj.G)U N47nb9Phr$T7q! nKrR.PH+_E(" Bޗ^8JG|ItHq[҈tkT#nMG .*hhW47b,Y6F C3J&0I=eUUpOh ҿ@=oZ.m)撟"~yx Ϝ34[30kPpG bH2SgllbEcBK67-- 3o~$] 2h%BޟU>]Z'%y[&GO秆y/z BU1]"T ME/BɋY+!'--zDCeҩYB(EDS3 H13ȨDኦ76$Ԣ)X,7&&HlJgH%TH$$9fjBRfie'*L9kN%i=&Zk(7(("_7RZk._) XeS4q\ R x%iiM‘Әf4YM UJ1{:"4`KxZ`$Xu)M Kgɘ! 1G"e ZdY˔1;4!(nO桜1*gBFl>V#%(3NF pqA9C0e)HCXlqHh-ֲ g ^;)Q8|^['Ra"eoE}F1 61$nht`^ &#iiH,(x0q1ikNH{ԥ%I쐼8xD9~ھ^!XZc8vT^S3DSwu$njBWg8)I@(<tqK$hXII;`R֭[FHIY d_dx"'0;V.*e*+҂VA'Ub+Wwr2hui[e羝*gVB8j I`kq)GrkBt!mw[Rk N;@&}]_jμ_A\'-ۨ]z~tTL}Qwc#KMeipNHD>_S{6B"Uד5EC@;h8A|M:"É|F%ͽ+C):F4s`եnqƹTI3zX@'U9WLnIޭ sn`*0Dӑ 10N!;$ѡ756!6~Ai`$1NwRK,{^6Jy`9GyU;CRmJ-$W=J/O;RmJ-=J/X+KqL(* JRPJ*uF)a(ͥ&#e0|Q7F|PKs{^4J n\4Z0ԏRYb aےZk G)a(%2u۔ZsR]6J C)#kRmJ-Qz(< #e02õ@)a(ͥB;QRPY~RPZH0Qz(7(h_@a6'RPJRMU'PJPJs%R/lRRR;R8P9°vRɂV5 V*J1pNrBÂ0F-d F`Zn)s̈^JRL3YVy?έxnH?P}1yg_unߍAv6+j/2v~9Bo꺠5o8 y~Eh|Z2y3LJRoAE1o25Cj3͒;;{ j4+[Q[,AriZ%?~2BK'cgW3`3xg3v6$/A_ԗߦBG[pk5/e0Zpp4ހ56I ",>#@*Ɵ»gj¶I0:I?=Q|U7$n$$"6kQ(PvN4ӈrcil˼&ov\$L תv;p0)+0œ+hxJ}aP[Z=y7>˱yTsX)@Bs>&ʹ>E4D:6Gyf U=g|'st7㼨^T JFzȐ3C9TС4&.yƊ2ˌ&OI}7<^AK.Vc,.~H,qo9[,-W\fqQQalJ`FW!y:3,Db~:b.LXrə>5r ^S,]$XQ- C.KCʫWl9M:j{f[u?ي+k &/n݌ň'jQ-kMaL_]G68}Rx7  :∖݃Wq4K-q6-StW {x$hy+##_F+4W͑Z JtÔC*;*D!K*X/Z R>L9rmʡQCF vKG4#q)Sx#*lhMRŨ0ɦ2Pj9\KJ(3D;gN&*)H)h|zc"zEh_ ]NЮ~6`,tnE1lAAӔ py1^`*'*><Ͽa8.sdfh"CB jH… a龫m}*U{ af!/_ϟWA&b.Z ,U2ᾴu`"`m"ݧm$=mR *@ҐPpj؁aܰ>{:up?VW e^"jK{?}*B5Ju_*BMW` ʼn,1ߝiWJʫG1Z VGY(ʤ.LQH)):cHERj$5b&3"NLj_^M &Džlh9Hwǎ}':ít0aֻ3(!.%:#g0iɴ2RwS##QjHy_җ ~ʟsuUoqf 9xW3pS%It(I fo*}Y ghb3(9q$BTi) ]'e3xB-1h`%*PWcizg"9~ Na0[NQƳK⋣U+NW׿6匿؏6d j_/Sx~:{?˥_LOX-f?` AC^@~ Q6>H8+?6rmCH %J!-;6VS{cc<006%Ϙ0ࡄ%4MANJ!CaJ-aapi!RRdF &kkWz4_}1/Ӽ[xz{u w/ouKGX` u;r+Oxbaj6~o"b|ۇ+p*2?PqշٞqL'l?4gV3?/w*Nw%]({ts}=6 lh-k!iͼe j ڃt@V븥 -aDrNbQT |3wR͜~\R2rm뺄W,rj&ڠԾ>PhXQ gFWnb4]@HncmZHN`mOnuh\Q9G˟WN+3ϓ*AmO%)C|=< $ao<郐:oL@j:Tfs#-e1E&nQn}_{}u-^^dMR+jKf%<R.c~T\-NCx|/~l>kdvfyzdE):oqfo+>g7Ϟ, DygoS͏g Eх2?S;"%!ˑJD:œ5Bbԫ(I -^Ĉu)V:4>#f}q tJXƨ\!4AD a/j'LdF$8>JEoKewx=Ys@(6S aFNt/3;km|bG0+ @̉sE[zw\M&OKk@o4HGPFkFe$cX,qgwYPŁ^[4c{{Ȟ3>!}9%ZŪUE&htveQ@Q9dA>_/d\-lj4VVdQ:Rd*!6\ۺqM'E`0tu%Zfkcϭ}^AA?9]_OO {+,$u[zߩPg}fᎃ,w)(p$f3S\3L]d!pSBeǤ;O!58oG^"s䟗0g] U)؃EQOAc(y,țŧos!>DG񵘋pQ:#`R$HlϿb?3saT#΅! ?I6a9.)r FAԋt)YK&a(Gc`۽WK m\JReŅ6Z2BEa!lcv* }+GM}g;=j,ZZ F-*i1t@+'6hWTN5貱JэRS#G4O5Ñ]#@jIf@*BSK%jL-㪨~IuT0I8lLR+܁8XBq6rg#kP9@G n5-\uW"@X^UJ_sԨ ;{VjmQȣAac l-x$cPpᚐi&uX\>>sWFo,)us{aܰ@V_j93f CȆmQs#4Ѹ c(5p"#@k%%G|Kg"@]Nucm[;T# {$)ٱ8p 5>MI,Gb|}b@^ 662T˜1QRœ8?P `Df43E e#1&Z x̲v4ӣ[0&YYtAlYI{"75¢183=<R{7ۡTл :Hne_ަp4_݆@ h4>npٹ:N7RۄoN=w/{nC C7/idz>,̲#oWYRX>⾳q_~h=NIz+)Nk|z={T>Y,|!(o}L=SEr 0-Y Gy51S:}۴GIh%y 7e)Y9ἱ}ʴb۔+ŦtSxau c^u3G 1?28*DGxCUfa2΁Bܥ<2;g5DmuVaC&pоQs{IZ#̅LZ@Tb/YKg&DHk|NB;ڵZ"N %)L v 0*©R[i#BVTAQ9K)6VA\gl 6hW*Ϗkd:JqϕLg)pJ9RCd]QW_Ta*Re,KEk@Y결*)ľ#<WC]qYJKNs5UHEm[Zz]yP [lY'P6u)`W:`HN˲~ԗ^/C2Rr5˯—U jﱰȡ_TM B]׍a}(~ѡ D_ǴzK#$_v'm2W5  w65 Km,ɗ׺`,^y 9h$_&anF>}%|mѪK0vRov*'+j4 `)<2 vMT#7_G8Nv x/gZD,<hTlPKHC$GaZ|S4P+?i͒ lLkq5JT9.o GweÚfjND`}Z#hPkiX^ S6G^KV 0q|4ӣ[p%YTI K)lLJfpQٔ~JsЙkpOMɽ7>@<թQQ  {⑬@%{ɽ,q)bO"RN ʷ6VlH$! yfOjY !)$Tz4Ž'cp` :knY :Hn]v<*ZH6B^8Ds0`fqy7lwA trݦ\Ey[mpncJ8LI05"[yA5 ϭ_6>r7_CÅ{vۿ5| ?u~ "͒'7J:T>>nJ?+?_Q?z{o$Z^[xwu>]m?qX[/.ͪXmŷ 8zPeL\+beu-Vd ^7?S{NtmExn-Z]ՏȿwZ֛]Yvnj ֫>XٲdMy&mԩLr7-$[x&?&\ʘz&L6 єdAϫp9~8?+Աdq Y!bUR/aϧPJ>83AL8{| d@h- +8^`A{kE8^}^k?)h jK!2PݥY[C$ɎA Ʀ{v)-5el»Uh}ֻ.D!2N^jSkSI\jVIzh1 Ggy<IyA<&)eTywY+NiĜ(mE\V?״u ͼ9ٜQ38Aoy\B-f3qge|V\?#{r>f{ڻN}\͎/fRoMMIxHޠ!!)kg`A tr&̽ͻZFz!!)3s߻wA tr&\Exh-nLw!/;kf#1ncr>H:B#{^''Ӕl;]:.COقE wOֈO7PƐ4, PIt?vKh<xYmOǷ>lY`FwrX.One\y3 +2FIP!$f~M~ػw xÇ963Ҟw&^pmo.8j{Ix[lBKTOD8 ՝w8n?}z𪽽^Z󏟋k~ ?͕~/q|Ir@ sy,=50Dm${X{t(=gҸ3qAQ*P @)ov&9QMJu8' 4 jVj;.C,Ṱ#i(=-ͮRK_6JP N5b(Ci+yF8D\2 P 5e'j(ERlRfR4q(ŇXD%^4<3<4]RJt(Rٞ=,MNK}+5/P GPJmt(Qb7w3$,Pwd#e9VjJг@)rJ[ \n,|-s!/ǑkAAr$Fhj*ƑM$. 5(%055ڐnv )1f}nMQa+wS % 8=.>6V(l3tjvl ן;)uOv`!;;.~MTMZHN-ǠQl^/;4 esV7p`p-ZxZ(wn̏\?)Gb7o4hBs㪩.Q@LXFK IJ*-iQdKWeYk*_hoMp-~z86 [ ~jU+_l\ @Mn?*;H4ZPfRsv :! T UA UrP1RˆktXUmPx| ٻFn$+ ̐-qQlKauZ%DDd u${".U' $yqs4hb(1ܹL]I0rߵI_/>6=Ѻ[yyV:Őjy]>ji]vp]foW7?nk8LXpE/E.S2Wr((VH*rP JT HYPRRUi@ka4e{'@qs5R*ڋ( SUʭ]W;&f.5hi332jki|p[\߅` >-W OF-[.?}ëUř1@+ߟ V #L w%OШ`U筦^iO=iLvM.׆Bq:͋K*OfkWy5IQF)>JADcXr~M/50eom*!pS ~c6P̸Ɏ8nJzɄwF;Cg֒HaF:{{:Y0Jﻙa"lin\7-CF nw}^!V;IbL1 ՗#C ;?N\#]LfFDNZZoaZKm fu|pK{O7X Idkd= װm1jO/f?\Z|όO6[ᛋ=>"-toiuaJ-zd'V+u;\ iKfh|~JqQک%no+G+D>^f ~]{SnSEn_˿]]\_8Od ~99>A7V#://]#tw/PMǑ}*[m{*/*|VTgaVE>z-]D:HC^ldǺi]c.Ѻb:߈n#F(S#n%֭ y*SG;ͰZ[*!6.pn)ˁ֭ y*Z4x~]Ym]~n eT$~9y* jA'*E(ƻ~ƫǫ0[biI+ Hͼ]Zٯ3"SnP;ޝ~nO//O=Azxr34y܂W}*S2U3Oy/>fWn$%"<}wDUCwvv}_;]/'.7gVF;3+/ٲm>Qy+[,mbn_K8zMo}5߼r P2'CfS܄ɂNP#z "O3!`Ϸ{x:IL_1J-8DaXBIĮXKePJSu82l2؅/Ǎ1>Q|Y.rMfsA \Jl|oeߐ<Ϸҽz@P  j?MqHךnN3H8M,^G,995׻L7ej: tM0Nb||QNuU)9} ^rR] P-atA Ȃ ŒI6+N])e#(D0^RAs`ᶴ+ɨ1Ae+@/^", d4LPI~644ӈNFq&9qEJ@WA2㥶:4 iE0djF#[FdcF')u+t?ˉ15tdͥ5Zi Z8[J*6d4Ոb3^jDí "3,Y 岔2keA:'D[*E |S`,omfyhz< ifyfUZ$fЙl3q$^X*4Jf}h ('F$GJlxBHt,Q=*x_*Ѱ1Z`4,hH5[h3x#:H y~bXHy Fe Ѽ0S=3CҼt[Q4oy?[h{Ls˔Ȱ2şI@[aMT8!jfLB8[Z=<[5 G^iVO.atB%Q7 dcbA0uu7} O|b:quKUc47Zƈ x4Z+`}羋(4ẗ́p]ATko?0dDdK}Q~}D3^à%_fQKOSYz(SJhyG3r+|gIB&bD(^Ѭ*T29r@C^&f|;֍ځ%~#rhe ky!SBy!;АWI:#]릧靊A~#ƺ޾[z{uBC^)ccC4o!oC dP1ykADF) ck^Cnԛ*|$@~Si(<0Edj_>J#$a>^@ S\T%*밧Xڭ¤^lƋ Ce 1wpDΨ(&ψ ZYҁB ϬL 7 ./2FXƔdHil,<4%"5+84XBI-To=S P~-yԉRPR2Niܛ:iA9Sd́1)jRYY%Zvd:Df)3 RZX*YFKaJ9q&iZJlz~FvgQORٲvs.HϷG)UMaH9e"vI4F2XqF (ShJi"!FOISdg?Eg~HgJw|ۃ5r| 9'F͍n.15h3TڌUl.ϒu=%B#9H,{JS8* /U ўo0I_r ޷!#_|<uI*}͈h P3F;|JpRl][6mXljVV[3`51^ 2c+RP r##b!\SG^AA=w IJr|ܤ$#gPhYZ wpqi)!2CPRPCȔF-皹Rr!p&SBI--)+فup0mRRJwBђ^QN\fW2QJK(dВ 5Ք|fc (^Xqe*"Ywp!Ll 3F3CX+h"RjUJD5}HC0F? ަ*Q i`KWJ:qP5rp Hy66|nlp`gҠucNͯW3z|*% :H") \ FL6ĂxO/a{ۈq9zޘ1F(!r@ȥ,L#T$]vcq@%  Q=]DXr~ԊْS!dvqN=@unqȰjk#WbBeLpcn- y*S0Oet-[zD떊A~#ƺ (]M4fo[U4Ir qX7kƴnN7ۈ̀-9$Ժu!\E`ǛL縴,հ,'m eB֔o, B ||a776OAԔ/NZxSMګ|pSӃKy4勒(~t]h3Fxk&f {i%??ft|mIF3XOCC3oT 3OC!ohdgϞ Nz-S|Lh dnvjЉyn4Уސ+F#oupuC\>ݬ 2Lnՙ6 ,x%vJ[+C3` 3v`$e6ӒL$2JK%ͥ@r#@ʖP isLnQ4 bZ+̈́d[0zYZ$ BApAF"YD7{тtqHi?h:܋Phrϙ̘pVgOL9eIءoq ZjF8"䆰)Uw \3\Npa|]`Qtx+ljAz?kJۛӃr0:)(ĊS`>^`7y62HzX0[0! ߳v}!ڒ,y4j'ӎ҂ML5#F Wk!s%[.ăQ6 0`> CX-XnVXo১<>c[{WQNF ]|jOeD ݍWRnvuJt-nQ*;΃rrz3gښܶ_QD2:=SS5Ieyʖ $㾍';nSԥA"n%/M|p..yfQTU }w3}j1-wՏSbbb)u~f.M3Bgػm o Rit7/rCI"s,1ikN֟Ų4^#$Tl'Sgtr6ݩI2v$5-`G:Zy uE:Vyt1;ubNCW 9\āZ5CA(!d0 %|f4˜1ƫLzb]13!2>P<,rWYSEz,_柵6JB^rMRrred$:+F"Y!0 ,_*z[Lc+I/!{|e*OoDF eq)pXG9;ۦ _wڬ=ʴIV6I̴Y2%25s\25ŁTRZ I(ŷVͬ$ %uAb?Lz^@Rѵ+[ qK!̸^*3rhqJ 8QVfƘP",IUw~i(0Ŭ7FX:X"rEfg3 ϨF:܃j5nIt/R Z8S4ؘR3=Bjf)O@0:,KI}YWkYzz,hKY%o8.$g8KAD7WK-M,=E"K C4k%xKOт\K-Yz,2\Fj"F}R_֥f| I4>W1K}YZyfi4h`)i,-pfI4T_Nɡ;fiکJjIٖ6KZ犵Q4RKfzKx Ks-e#`~/RK=i4T_ǁQ4TQj%wOR4bqKXZH);Ri,eXQaKYy+i'Ǘƅw,\x QEɧNRԎouq&==릩n#$뀓c9>//C`rz8@RvylҰP6z絺Nf(74q tFKЛtyJ+QJ)wLxОP ,\"2 i6GEQkg\ bC tϜ{6lk4o*{鼟G?^芛w48%A'$7˕B(qlbį=yG"^2\O+;}< Ùqn]q`'8)c3Z.&_b#_ﶗWbmĭĭG){]7dLx!¯LZA v29 w:}@2c3$:/f9?v52b>^-ߦ-fzi^fDk'gTI^)Ot^=}$BAߋ[EZ~;v@+bvcQ}4_!aBP$lPۻ///:2 n^~./M\J\lM~xu[_|3)+sl-ck ˲Ss۟]ا'T:hD~cN]֟]uL:>j5 loAm 0AݠUkpzwXT`w)A x ΨL-^KW0 Bҗ1UwC Zp$~HZP0jy&;nq-}˿}ULFhs 4 IM*xQV^>,&~~w3YOvpV}p7NKpMn^nw6UTӃK//)(%z_nܢ-Aիx$~.2wIZ>7ږ\ӿ׿#{sWFUh1S4Jid%n@F6tfG%Z.)ڊSvܰn[[ RMۀ>֭}Cu!5NDN"JCy^V/Nﯝy)^WNQШ{śf*{xt0fib$"hQj7<^-|E&n*՚wZk]8~X馞PS %TuRW_VBCVJ!f[R؆V|+Ah%nuaM%3{zzb *QN?-lzJvj0S|Qoc"C1񍧄](V S|Qԅφ [-IEj[Z: oI]U$ٱj9vEc+z@Tr*6 )9j-Kj;%k\G.qv^D M&=1M 7O] mShU,ta(0ιDQϝ"&S2s+jQ;|h%vl=`9 [{+ :I 7t@=ɼBFP朡sś;w.8wBOۀ1he3\W҄{icJXRNpȽb=ɥr T$͑e.'ޠ#dll :h ;ҳ< f̀7h ,樶,NRfi<,nn` Vb!pGի ?ՌP^P*ษh$T9 cZ2&X,[- Z**2}Q3ڨUɎm/#00*gݨDg$!R{5@_ѫ$:eHgXl+zu^0y`q5(Az,zw?IѫKWNcD# { o]CP=dva({ g3*4ĐF:&ذpQ}aR>;\jAړu ܐQ$̴ %7$ 8냥%;_f`+h+N12dӺi`bBhbNͺHn]0SǹaݐV:nm1HNo4nC"ͺvHn]0S0=.K5kҽu rS-]`73 ]ҮpVǓ tm#ӣw8ܲ$w(] @FE ѡRlПDTXY<׋8m{GA" 4:P1G*",LW'ǎ^}ERްWijiTaHb!iEWݢ܄v:Sk\Jzmɪ~מ|T_>(XF>F 3=,.a\޿}?UR7elj|@%h6Sf33Uܑi@ZxNm+apey4sчj4ifB#Ty;YVg/e] vYxb޲tUآJgԲ|USpPCT [+mX%՘IC'K =G@%v,&@$v:~ eq^Ny?eRөɂ7%mΝQ_v*')/~~B ABfq#IW %hi*G d+D^(u&}Nz*w^:ˆbc):#JkQWM9NWnҵEŁF4jB)MS\vO>%] dI' gHQ@H1y儡Z!0Q9d X Pxy]5B@UmSޓ{.5I8=7Lӌx=op2sZgJV(kޒ:H0D2CZ PP4713TkYg\ es {%r' !C!'mYe Nݖu!`,[b^]5"e"ʝTLz3Ni0yDi<)#japUq8js]o#7Wi*{8%2oYfߗlv%Ydj=db.V},lqJx.qv4i&2\ݠ4L3(!?jۈ~)l>،l\`-,ҌJV=9 z=Th{[- d<^Q Z8ZۍZL;h^\jbjsjY}vNBJ EkmDu/.CmzA-@|Fٵ-ama-̌8'AI6[6C A*7 乤1>`m8~15#9rji[ĆrŠl0M7M2XMf-.M-!MlݻI6yE9jM$߆O!J]*W~8 3L CG 77d?`Jv0{hx#Ec0Oب#0৮ϿhZG=Fj]z;5$mYMcjKQma\Sq՞(w%.\RCl.)5#r4!ܼǛMCs=ܼ] C4S%yj^DK,/2Sɸ(e)J¤))ʤDЌ @(Mܴ*rOQXYs2c'UB) N!0MH'rLͦS4JdB )U9+e)HJHHBӥpwrƌdfx..0a3͏Ѣ;ۣ@{QO΄>[#:PmgԈĹg$b>DP K٭sv-Q7nA.V".EgiWiwtC/k7vR͵k*r9Cbp vWRQmkӅoŨy/5`G=5x O[r S?ۮDojn4c=&lֽ[qYTzaH.x. IBx\m.Ph90Ay n`JPFz>_v:Sx95g#'{ A H >6TimRkpEI}.^jjjo[JRvDž\͸&-dK/څ?\YI-Y-񘼻:"5|Gn6@쏻->l!o1BØl-V>Љ}GwѦH4x3OB8D0#  X#uE V#"RM*,*B8DҞ3Ơuû'=YBȻɻdݕ>p.7f|(Clj'#?VRkqt(%Rb..X,ESj3RsθJ]1 VAJj-Qz(R/58rOVRK(C),R ?VR+`rDIJQU^IAJ+Qz(:kDƌ/J+Ҹu(Ev|Uw9;-f'cǦ˲ȾfbxZLl?zjO.=0^3}+ΞF8Y qtI"9grO>|~,?]_UG¦IDX&)U4xEB=%["`ȕe%i. sn$*F:=Ź̢ EW/ (1 S+%U^㠃*G!5b92Ԍ׺{pS,՜4z.4fh]T$H}єZu9bP*4F{J!V(>y3z;fH'&M)\L$I%u$/WYy¶*kS^]UFNA8K/Gᴝ%x38-:LIe%'gE9)^,$T%̍ i'IRBjɦS%ݭb[V%Tb$?%"e R5JjrDiJ"lAJ+1.=m CATJRq驣뼶ù4?N}8ug?6aqq;lRz1[/̏?NӇ;kt^`j|g4?סz@vwy3B$P&B Bd(,5S*"Z䯮7+_':_[ľ|;?,s_srsQ{iu~rG%6}м|I%eY,&U< Q C%)X.44չg]FRu1j4j43.3/mEgY EN IɝjUb.՜ɟqm-ju>]vg#dռ(7UeWə͸Bu8 Uf:e&ͅ2Q`yVʂO pT4)500(1RMksF>Tr4T`kpYbW;+|0Yfw,@ v Ιl2O߮Y'@ xb3$B0y6YQ_ Ɍ]"<@1뽪w]iwCX(ij͹w vp1lg٤ ugխ럋d~N%P>8^^.ћnz๒z[WM<_n wGj <``3Yw6*)TBnw?/+CI&?Y\2Ɓ ljݻy~ZZA-?Gm JԊ-[~1*p8x8D%:'">Al:q zԶIbE7ShPA}~KԧO=G}f-4l;mo2eޭ]Nl,jFh^fv0% cwck=M3HhgAam0Huޏ !sz@RKhZ->Qi-5r&YKR#ȶ|=")5>(NT\MVlUeUV*6[[rOۊ;/9yK2owyOgͽf>^"4M~e`F)2O^e?ٻ6r$WL=3d˒oIEKK"2L$ćD h:c|IR\s*8mr'ޓpSgM7;btAt}GvlژgnK []O8E7R1uŠuMt;*Bq~ֽ׶0!/E)zsA4JV+7h|U*bOID/f;tw~=vRHMgghyumFy Zj:b$l:Jmj`"vrju.,C4]FFW  &-}&X) QX!3 m!3^2~ RfHMHBưDJ:&a,lr1",icŲΊoe-!gz!Q`f!Ҳ`2DtuP|gS8d#Rm!BrRU. P˺(MH<_P]G%Gܫ9p&,`ZR|-&M)EKBwG67ZYŗ1Z`^}LknD@вgϳ|8=K d-8<#dWo^xrq_E+v U&ya (W-YjOȳN ~{fB'7yØw.܇M~{qDSwhίJϊB/:ۊqy0""Wk#>f`,l}((vDf$ οv|k|V&Au_/Ӽt};ťnO]D[t^5e`;k!3jphRL58^^RUaGLYmL,{5ڜk0-n}08ظs4"tǓ烜mg?㝬7̡%ʾDڣH}eSùI.P‰.۩Ⱦϵ禕6"vu28|aJڹboCm0tC{RCu_jɸ'?ޭJ-G IF"ǎhEFzxm pdR2<䅻h'j1TJFڃp])YxEc[ly`6p B%#[r8nƯɷxuCdF.W.ʱVsݏ7J1'c-Ҥ^?Qګ$M̮I,cG?vckqd`ϧ*h`tVE(>:QsQbIN+x^Rr;Y!hs,yA) H If3Qy2{<^PΖwh;X2&ā_-2Sc|3M:T8^8?7 5Za |X`ނi@0.?؂t4#\~ػJ|YA £tUP6Ԗ9Rb-\x"#͍ }5$?rKrt:{' ?G1x4c,HxrVE&N8k",)n <`$gRP1wĆ)dq K-Gac2=A#׬ B, 84.wBwp;=* (Pv#;&pQ fvӾrdq{뛓-խC~>ǭeh `Ghm_{滣i5^PrCgK5>cXy/#l2:krB+&ݲ3,W\jrhii9ԧbW8Ah>_; W_чFZ4*y8I1z1z"\mcQ_G:Yq :B],Ci\la;g>h4(8A}xˊύ4粱YR v(⨛5pP\Yt:&MMXJ5 aqF|--`"kv ͓5Jho, _5Ʃ|i@wwP| ~tj#鐷 T3Z1A] (JA 98R[|{/$B[Ԣqf)MNr4wtN$J-uv2b [h J)QlRk 8/pRVJYPDpq!Z}mp;%OQ9TyQGL*AI*:/'{ȧa|!JDa@0\+NR,qұkT V!K Q]ӡ5 m9vad_srlSܘŢy:|[W?GbӽF'&C=J hp4~pnvQ=;= -R\5f1:FAuLWaI'{emLMb1Lҧ Z'PC3]&k ]L74[ $R\T\!Ŝc4w}?gsZmO.1Up 㻻iwIN#HINJf#/՗E}^qE@ Ek*+ ؀[a xl{_ۮwd$dl>O[tXh <8%_LE,30E^GG[[y^mJ{ZŽ-e8EZqΏ߶vMg4$Ȍ9OfFꇛebo-Yv?PdR6r@` i'NS O]2<M0XӫCA)4# Q gsZ2\.%)zZ aC4X'jF8cmu;m^9< @xat55W@n!  DV8U xHj˥ff=학 %j]z5KEZ^=gɹ5+PVB2+F乐G >H3h2֫E+H n(Y|cjiALP[zQm2kw oK[h8*ۆ7Sp[ٗ69 oŻe[u9z7lk~fH._c{1ఘt1t}O?YQ޿ y?'˸\5+4 &~X ٟ;r-,(}ηأdV48&׷xmv4ET6Zu4wn e͋eH6I C}ˊveF@bz_?\&'偑r*mdXYo|OowSf072-ړi& ?ܨlyr,&M"7!LJ19Xb>@VOV1X餪 !QB2`hTT}@o8Q3n[]{c &啮R)rPŬY yqVKJx`/38Xfpvۀ)K~'%L*5IҝC]1t n u7#fjfi )]ۥͅPDꋺ@! #۾ԗp=;WθImzi=)nQ/kP7Ҭ5et1dzmh++i޴bCi)s2UR>|%Wה@YZ׆hӗhyޭ?KVM>Lyg% (9?|qzVUX3 y d(Tu/J;߼lgr]}|}]Mç_e ^5e`т/~f{@ُ^*,fG`yY*΃Fd uZ렧 bsK~)H+ޕ=q#ŧMm@jC*ҦRMR E"elo5:hDJ-C ~ݍH 3gkppgo)̥xB%DNLCd:S\\9Er9c1ptFS_n| aLvL34u`  9FPEoquuz9~k9J%?ɰj/% @k}'ΣK8^,KڣhEg_}24A[2=xAE0Z=$J@`6Fܽ  l1ϵ`PG@q3 rDfD0twxF'Jyn wz/M%Nx1AG[q-Aja6fiX<*{K^FHf*dF4 iH7\ l2)rPQUC5oϩm,5NܒXP=WI݂ff}'D.T`MQ@͢,AՠɇWP j'4b2xiÁz-6KK4j&`գ7 :"WR׀^|yoRYH%q48"Y4O,2S /+i[Y9Qg#EmB ZxbtP(3`ikr+ڂp(jrr8mB\RUn1[!hyjzBMDŽ\<ɆL?Og鴿PlTFcʐ'}, a(9IA&0x-Oh @kmVhY'rbČ:KkIg^}y52K]ጔoWLҚ \5PT;VPMpbs!,ǯ13ĎOn j /Z]̘Pk°җWӿ<D\fg W}XRc -zӭJJ)T+-1 (85i˩JhE36Tсbl>UVy蘍 2@l?ʀ(l9TD&&eՊ vzحf#{w#g-yef>KLlWXLf(3Y۲ "]QeO|R(?\ΈQ#΄r`8L갽>x܈k3=KAfcG j@ r`c1KE=\RR1Mhn9N]~e5 P5,2)b5V W vbްZ\r9wYS5V C,2)}VmвUqx.GUe`c$ HL6D-"141;m5q2z0ƶ"8!,TW6!Vҷ.!tOAo֬5۷%c5m1m | BHvޤy0@|:>!a6( RN( k3.D!e+C-# )~1<>ՑєS#BM"(()awmH,g^"z8^"zE9I:$Z:!ZZ4z Je|F)e~m`HTFtJ(5gܧH1)Yv߳:xMBZ=!{_qBܽ4qP̔ !a TJ8!kB+~uPmP_

== f~<~Kdu:S%Hr!}@Y'l5^Tn?[Z`PkooXMo2Q JxHT.q)4϶}& qUk\Z. ky9bBMУFJnvZeL2__UT5¼f4E9EN/YY-Z ?En;yQ!kIR1 )rLZ3Z'<*+-ʱ6>At;ͲJ5g/45kJ,u1l"L+\K1)UjQH<K#5HJᣒIC@Fg)*/Ŝ@=fU-2ȑi͵ff-jV +4ԶEz,wt|> mם *J]}*ԝSEF.33uǟ* s|O(:?ʪKuF3˹cfzGNsB$ȓ*1g^LS ޟi2ȟ>H ubi dJGuR$BNќ_1|8O?(Ҵ7M& Z0 exG#Fisr;day\b M~E q}!l=[9osk"f1EB3kMf1u.o*.0t!rM4pD},п[q֙gK{+[~ Q-aoD G ݀1R=w^3-D+nQ%P]H:ys|˼|5钊#VK+YR "" iЎf/M(ʴ@Y*!(Ҏr> fߏczK@;XbeWDHQ^#%l>v<쾩Go>Ku2p{?nL5|'iܫ!#bZqW ("`CTTe\U ՌA٧kVrqP|r&#ZG͝FzJeULXDi@9NMBJt PҪ[dPe~.Jm-e^Rk]TM Yh]o42s~DAAzZ4Jd\ʅ𿔎+'?tQKp*ѲBU=Vӝx>KXVE~jg8EN?#~txXW0F~}pZ*ΧgorE(#~˘?~e2]2p0z;ŘZ/wNX3jFt;C7]F3e"ɞ@>pkxOf`RAY\Y8W]5(p\pu-֔ABzMY3z.dQ ep&cv/Udm|tpvx Չ~ T0ޠxK iv ĤI -|hϵJ]#8/>?]#[ʍ[=k࠹z]r OcY7cvt3kLU-x4l"ZKELЭ|VlvkJ:hfim5[ELa -fĞC5AiusFF(dڭNڭ y"$SL鞵ZYiy*0"=g.њQ mMspWs6,7Y{#+|[`**׬.r Q;q~$KuMRKq_ xF#H_e1/ KO[<ݴ Kuu:-KOYS(y5z:/u~9_w9FAicؠIx#i<\yrU#ƒ:echk۴eMn+*ؘ(&ѤR̈́g޽߼̱\z~OÓar CXJÖ:iթUŀsQs$nL<՞zns5A:-p0&ؠa-[μL!)wyWy$HEɯr'[0$>T):Rj"Nֹ]!Щd2%0˄71.٤c{p.)k҄g;H" K@^?0pƾ|x뇂[.7CyA//LA^X)x~lW}MœQܵ.-H7Eܥ&w͘$Q y %w9 sٹa,B";ϳSS Ӝf .Ny|;3`t?꾈F[j %.aU UᔢPs&hҒ[<>YUoݒ*m@ƕcKUjzR_Hycgl^ /T?W/yܱ>obu\/߫3śfb>_tl}wMyy˟{|tS؃r 71@6(RzgI RބxkPo %߄JzK8&4;&{G_8][oG7bn-n %#c)}}JjG JL+yU [YB%lVޠMk6 `*eǠixE](k*wݴXU~ﴫ(:et٭qV,TM17V8%|8R[>}_8ܥ"Nhowd]zW1$w5nu(T6eLLfWd _VXIH_C?xV``ͥ~^ GKYE A)Kj"S]uiƉ1m8ZOMrG0* :׫4so*I[cxn!R4*0l0ň ٪umc1t^ UXA_yz{h4VSW!O!ZI#չŶ5qI\Ʒ\]7(\UUjfc bq` ˺-`y~@[QvIYXQnGʮK:k^(pDWRv|D(}D]I=^,/+))=vԳ y=$w"k*Ռl۵_U=͌8K9VbK1=žrl>{ >n#DGAΫXsk*]}ȥfB 9,pFȡTZMq+]ʆ}j]wzxel1xMY&ggq،IMu;c".WDvaEA@7ˬqa!8^ ~+Y|'^vLpj{ޮL'S3Љnv:%tyàIn(/F5 DX-do'W}Kݮz /uF;Cp襠z]P#P{T׉= !svk$Se~Q Z;>IOGKykʮG+Xd+n6 U)1߳6[ב2JENtΈJFOǥP$=5ٿAǣp,6.OוȈȹ uC;Sw.-]'?[k<8ZZѹg5+6G0DpzM}#zx&M205]MԚ.ggq،I4]4%YB||fe#H՟ETC>,˩~:4*׾^ز @87r2{=u ?o,n_ApR%6LQG=>var/home/core/zuul-output/logs/kubelet.log0000644000000000000000005366105015136671250017711 0ustar rootrootJan 29 13:15:59 crc systemd[1]: Starting Kubernetes Kubelet... Jan 29 13:15:59 crc restorecon[4755]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 13:15:59 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:00 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 29 13:16:01 crc restorecon[4755]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 29 13:16:01 crc kubenswrapper[4787]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.728260 4787 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732017 4787 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732039 4787 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732047 4787 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732052 4787 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732059 4787 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732064 4787 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732069 4787 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732075 4787 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732080 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732086 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732092 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732097 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732102 4787 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732109 4787 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732115 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732121 4787 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732127 4787 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732134 4787 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732147 4787 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732153 4787 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732157 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732161 4787 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732165 4787 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732169 4787 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732172 4787 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732176 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732181 4787 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732185 4787 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732189 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732193 4787 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732197 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732201 4787 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732205 4787 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732209 4787 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732212 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732216 4787 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732220 4787 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732225 4787 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732230 4787 feature_gate.go:330] unrecognized feature gate: Example Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732235 4787 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732239 4787 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732243 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732248 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732251 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732255 4787 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732259 4787 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732264 4787 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732267 4787 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732270 4787 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732274 4787 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732279 4787 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732285 4787 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732289 4787 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732293 4787 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732297 4787 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732300 4787 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732304 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732307 4787 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732311 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732314 4787 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732317 4787 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732321 4787 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732326 4787 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732331 4787 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732334 4787 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732338 4787 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732341 4787 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732345 4787 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732349 4787 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732352 4787 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.732356 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733235 4787 flags.go:64] FLAG: --address="0.0.0.0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733253 4787 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733266 4787 flags.go:64] FLAG: --anonymous-auth="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733274 4787 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733280 4787 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733285 4787 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733292 4787 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733298 4787 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733303 4787 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733307 4787 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733311 4787 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733315 4787 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733320 4787 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733324 4787 flags.go:64] FLAG: --cgroup-root="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733328 4787 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733332 4787 flags.go:64] FLAG: --client-ca-file="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733337 4787 flags.go:64] FLAG: --cloud-config="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733341 4787 flags.go:64] FLAG: --cloud-provider="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733345 4787 flags.go:64] FLAG: --cluster-dns="[]" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733351 4787 flags.go:64] FLAG: --cluster-domain="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733355 4787 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733359 4787 flags.go:64] FLAG: --config-dir="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733363 4787 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733368 4787 flags.go:64] FLAG: --container-log-max-files="5" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733374 4787 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733378 4787 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733382 4787 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733387 4787 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733391 4787 flags.go:64] FLAG: --contention-profiling="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733395 4787 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733400 4787 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733405 4787 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733409 4787 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733416 4787 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733420 4787 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733425 4787 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733429 4787 flags.go:64] FLAG: --enable-load-reader="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733434 4787 flags.go:64] FLAG: --enable-server="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733439 4787 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733446 4787 flags.go:64] FLAG: --event-burst="100" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733467 4787 flags.go:64] FLAG: --event-qps="50" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733472 4787 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733476 4787 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733481 4787 flags.go:64] FLAG: --eviction-hard="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733487 4787 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733491 4787 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733496 4787 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733501 4787 flags.go:64] FLAG: --eviction-soft="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733506 4787 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733510 4787 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733515 4787 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733521 4787 flags.go:64] FLAG: --experimental-mounter-path="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733525 4787 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733530 4787 flags.go:64] FLAG: --fail-swap-on="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733534 4787 flags.go:64] FLAG: --feature-gates="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733540 4787 flags.go:64] FLAG: --file-check-frequency="20s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733544 4787 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733548 4787 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733553 4787 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733557 4787 flags.go:64] FLAG: --healthz-port="10248" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733561 4787 flags.go:64] FLAG: --help="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733565 4787 flags.go:64] FLAG: --hostname-override="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733570 4787 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733574 4787 flags.go:64] FLAG: --http-check-frequency="20s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733579 4787 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733583 4787 flags.go:64] FLAG: --image-credential-provider-config="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733587 4787 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733591 4787 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733595 4787 flags.go:64] FLAG: --image-service-endpoint="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733599 4787 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733603 4787 flags.go:64] FLAG: --kube-api-burst="100" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733607 4787 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733611 4787 flags.go:64] FLAG: --kube-api-qps="50" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733615 4787 flags.go:64] FLAG: --kube-reserved="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733620 4787 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733624 4787 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733629 4787 flags.go:64] FLAG: --kubelet-cgroups="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733633 4787 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733638 4787 flags.go:64] FLAG: --lock-file="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733642 4787 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733646 4787 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733650 4787 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733657 4787 flags.go:64] FLAG: --log-json-split-stream="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733661 4787 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733665 4787 flags.go:64] FLAG: --log-text-split-stream="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733671 4787 flags.go:64] FLAG: --logging-format="text" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733675 4787 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733680 4787 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733684 4787 flags.go:64] FLAG: --manifest-url="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733688 4787 flags.go:64] FLAG: --manifest-url-header="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733694 4787 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733699 4787 flags.go:64] FLAG: --max-open-files="1000000" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733704 4787 flags.go:64] FLAG: --max-pods="110" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733709 4787 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733713 4787 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733718 4787 flags.go:64] FLAG: --memory-manager-policy="None" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733723 4787 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733727 4787 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733732 4787 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733736 4787 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733748 4787 flags.go:64] FLAG: --node-status-max-images="50" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733752 4787 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733756 4787 flags.go:64] FLAG: --oom-score-adj="-999" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733760 4787 flags.go:64] FLAG: --pod-cidr="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733764 4787 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733772 4787 flags.go:64] FLAG: --pod-manifest-path="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733776 4787 flags.go:64] FLAG: --pod-max-pids="-1" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733780 4787 flags.go:64] FLAG: --pods-per-core="0" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733784 4787 flags.go:64] FLAG: --port="10250" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733789 4787 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733793 4787 flags.go:64] FLAG: --provider-id="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733797 4787 flags.go:64] FLAG: --qos-reserved="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733802 4787 flags.go:64] FLAG: --read-only-port="10255" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733806 4787 flags.go:64] FLAG: --register-node="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733810 4787 flags.go:64] FLAG: --register-schedulable="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733814 4787 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733825 4787 flags.go:64] FLAG: --registry-burst="10" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733829 4787 flags.go:64] FLAG: --registry-qps="5" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733834 4787 flags.go:64] FLAG: --reserved-cpus="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733838 4787 flags.go:64] FLAG: --reserved-memory="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733843 4787 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733848 4787 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733853 4787 flags.go:64] FLAG: --rotate-certificates="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733857 4787 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733861 4787 flags.go:64] FLAG: --runonce="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733865 4787 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733870 4787 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733876 4787 flags.go:64] FLAG: --seccomp-default="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733880 4787 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733884 4787 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733888 4787 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733893 4787 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733897 4787 flags.go:64] FLAG: --storage-driver-password="root" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733902 4787 flags.go:64] FLAG: --storage-driver-secure="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733906 4787 flags.go:64] FLAG: --storage-driver-table="stats" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733910 4787 flags.go:64] FLAG: --storage-driver-user="root" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733914 4787 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733919 4787 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733923 4787 flags.go:64] FLAG: --system-cgroups="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733928 4787 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733934 4787 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733944 4787 flags.go:64] FLAG: --tls-cert-file="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733948 4787 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733954 4787 flags.go:64] FLAG: --tls-min-version="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733959 4787 flags.go:64] FLAG: --tls-private-key-file="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733964 4787 flags.go:64] FLAG: --topology-manager-policy="none" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733968 4787 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733973 4787 flags.go:64] FLAG: --topology-manager-scope="container" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733978 4787 flags.go:64] FLAG: --v="2" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733984 4787 flags.go:64] FLAG: --version="false" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733990 4787 flags.go:64] FLAG: --vmodule="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.733996 4787 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.734000 4787 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734112 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734118 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734122 4787 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734126 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734130 4787 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734136 4787 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734140 4787 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734144 4787 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734149 4787 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734153 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734158 4787 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734162 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734165 4787 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734169 4787 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734173 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734176 4787 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734180 4787 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734183 4787 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734187 4787 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734190 4787 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734196 4787 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734199 4787 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734203 4787 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734206 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734209 4787 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734213 4787 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734216 4787 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734220 4787 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734224 4787 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734227 4787 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734231 4787 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734234 4787 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734238 4787 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734241 4787 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734245 4787 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734248 4787 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734251 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734257 4787 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734260 4787 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734264 4787 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734268 4787 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734273 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734277 4787 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734281 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734285 4787 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734288 4787 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734293 4787 feature_gate.go:330] unrecognized feature gate: Example Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734297 4787 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734302 4787 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734307 4787 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734311 4787 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734316 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734322 4787 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734327 4787 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734331 4787 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734337 4787 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734343 4787 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734347 4787 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734352 4787 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734356 4787 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734361 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734366 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734370 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734375 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734380 4787 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734385 4787 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734390 4787 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734395 4787 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734399 4787 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734408 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.734412 4787 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.734418 4787 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.750127 4787 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.750602 4787 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750763 4787 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750778 4787 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750785 4787 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750791 4787 feature_gate.go:330] unrecognized feature gate: Example Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750798 4787 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750804 4787 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750810 4787 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750815 4787 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750822 4787 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750831 4787 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750841 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750850 4787 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750857 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750864 4787 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750871 4787 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750878 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750884 4787 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750889 4787 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750895 4787 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750900 4787 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750906 4787 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750911 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750916 4787 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750922 4787 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750928 4787 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750935 4787 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750941 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750948 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750955 4787 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750961 4787 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750968 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750975 4787 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750980 4787 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750986 4787 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.750998 4787 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751003 4787 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751009 4787 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751014 4787 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751019 4787 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751025 4787 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751031 4787 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751036 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751041 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751046 4787 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751052 4787 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751057 4787 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751063 4787 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751069 4787 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751076 4787 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751081 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751086 4787 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751092 4787 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751129 4787 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751136 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751144 4787 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751151 4787 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751158 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751163 4787 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751169 4787 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751176 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751185 4787 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751191 4787 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751197 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751203 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751209 4787 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751215 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751221 4787 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751227 4787 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751232 4787 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751238 4787 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751243 4787 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.751254 4787 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751438 4787 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751448 4787 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751484 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751490 4787 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751495 4787 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751503 4787 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751510 4787 feature_gate.go:330] unrecognized feature gate: Example Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751516 4787 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751523 4787 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751530 4787 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751537 4787 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751544 4787 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751550 4787 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751558 4787 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751565 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751571 4787 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751578 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751584 4787 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751589 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751595 4787 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751601 4787 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751606 4787 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751611 4787 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751616 4787 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751622 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751627 4787 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751632 4787 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751639 4787 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751646 4787 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751652 4787 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751658 4787 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751664 4787 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751670 4787 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751676 4787 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751683 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751689 4787 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751695 4787 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751702 4787 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751709 4787 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751718 4787 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751727 4787 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751734 4787 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751742 4787 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751749 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751758 4787 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751767 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751775 4787 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751781 4787 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751787 4787 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751794 4787 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751802 4787 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751810 4787 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751817 4787 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751823 4787 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751833 4787 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751842 4787 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751851 4787 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751859 4787 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751867 4787 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751873 4787 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751879 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751884 4787 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751889 4787 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751895 4787 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751900 4787 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751905 4787 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751910 4787 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751916 4787 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751921 4787 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751926 4787 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.751931 4787 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.751940 4787 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.752224 4787 server.go:940] "Client rotation is on, will bootstrap in background" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.758579 4787 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.758698 4787 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.761476 4787 server.go:997] "Starting client certificate rotation" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.761528 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.761721 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-29 09:07:07.156839651 +0000 UTC Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.761808 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.785433 4787 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.787171 4787 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.789769 4787 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.802690 4787 log.go:25] "Validated CRI v1 runtime API" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.849915 4787 log.go:25] "Validated CRI v1 image API" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.853261 4787 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.859854 4787 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-29-13-11-22-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.859921 4787 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.893708 4787 manager.go:217] Machine: {Timestamp:2026-01-29 13:16:01.891067072 +0000 UTC m=+0.652327378 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:1406f0c8-950e-4271-841a-c6aa782191ee BootID:b6cb75cf-71fc-45e9-a32b-9486bc86c1ea Filesystems:[{Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:88:21:9c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:88:21:9c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:a0:5f:6b Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ef:0f:88 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:5f:8e:1a Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a3:55:8b Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:44:4f:0f Speed:-1 Mtu:1496} {Name:eth10 MacAddress:62:88:2e:61:2c:b2 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:0e:50:b1:51:98:c1 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.894034 4787 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.894234 4787 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.894675 4787 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.894906 4787 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.894953 4787 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.895236 4787 topology_manager.go:138] "Creating topology manager with none policy" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.895250 4787 container_manager_linux.go:303] "Creating device plugin manager" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.896083 4787 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.896129 4787 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.897021 4787 state_mem.go:36] "Initialized new in-memory state store" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.897576 4787 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.901278 4787 kubelet.go:418] "Attempting to sync node with API server" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.901309 4787 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.901375 4787 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.901396 4787 kubelet.go:324] "Adding apiserver pod source" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.901411 4787 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.907336 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.907475 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.907332 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.907562 4787 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.907623 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.908753 4787 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.913018 4787 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.914945 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.914991 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915034 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915060 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915087 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915102 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915118 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915142 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915160 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915175 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915196 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.915213 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.916116 4787 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.917007 4787 server.go:1280] "Started kubelet" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.918034 4787 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.918285 4787 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.919342 4787 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.919342 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:01 crc systemd[1]: Started Kubernetes Kubelet. Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.920193 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.921079 4787 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.921212 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 08:17:51.180397527 +0000 UTC Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.921665 4787 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.921677 4787 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.921823 4787 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.922113 4787 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.922556 4787 server.go:460] "Adding debug handlers to kubelet server" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.923440 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.923592 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.923819 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="200ms" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.928930 4787 factory.go:55] Registering systemd factory Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.929186 4787 factory.go:221] Registration of the systemd container factory successfully Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.928942 4787 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.203:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f360242b60a36 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 13:16:01.91695519 +0000 UTC m=+0.678215526,LastTimestamp:2026-01-29 13:16:01.91695519 +0000 UTC m=+0.678215526,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.932684 4787 factory.go:153] Registering CRI-O factory Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.933640 4787 factory.go:221] Registration of the crio container factory successfully Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.933944 4787 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.934166 4787 factory.go:103] Registering Raw factory Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.934339 4787 manager.go:1196] Started watching for new ooms in manager Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.938013 4787 manager.go:319] Starting recovery of all containers Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941788 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941863 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941888 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941909 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941928 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941946 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941966 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.941985 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942006 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942028 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942047 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942066 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942085 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942108 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942166 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942190 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942211 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942230 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942249 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942270 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942289 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942335 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942355 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942374 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942394 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942414 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942436 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942505 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942537 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942559 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942609 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942628 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942646 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942665 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942683 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942702 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942721 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942741 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942760 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942777 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942795 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942815 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942834 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942852 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942872 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942891 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942916 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942934 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942952 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942969 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.942988 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943050 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943079 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943100 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943122 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943143 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943162 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943179 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943197 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.943216 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946798 4787 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946882 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946911 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946929 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946947 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946964 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.946979 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947096 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947123 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947144 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947162 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947180 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947196 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947214 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947234 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947252 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947268 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947284 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947299 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947315 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947334 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947352 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947367 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947386 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947431 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947446 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947495 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947513 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947527 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947543 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947562 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947608 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947626 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947639 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947652 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947667 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947683 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947695 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947709 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947725 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947741 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947756 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947769 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947783 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947796 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947819 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947834 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947852 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947867 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947883 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947898 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947913 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947927 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947941 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947955 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947967 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947982 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.947998 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948013 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948027 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948044 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948060 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948074 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948088 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948105 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948119 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948131 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948143 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948153 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948202 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948221 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948236 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948251 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948263 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948276 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948290 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948302 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948314 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948326 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948336 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948345 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948356 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948365 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948374 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948385 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948395 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948405 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948416 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948426 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948441 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948468 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948478 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948488 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948499 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948513 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948526 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948539 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948551 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948562 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948575 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948586 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948595 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948606 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948618 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948629 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948639 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948649 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948659 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948669 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948681 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948692 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948705 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948718 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948732 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948747 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948759 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948775 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948789 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948803 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948815 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948826 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948840 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948850 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948858 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948869 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948880 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948890 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948900 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948911 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948922 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948933 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948942 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948951 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948961 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948969 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948979 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.948991 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949001 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949010 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949019 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949029 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949038 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949047 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949057 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949066 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949077 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949088 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949098 4787 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949108 4787 reconstruct.go:97] "Volume reconstruction finished" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.949117 4787 reconciler.go:26] "Reconciler: start to sync state" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.974385 4787 manager.go:324] Recovery completed Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.980229 4787 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.984365 4787 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.984427 4787 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.984481 4787 kubelet.go:2335] "Starting kubelet main sync loop" Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.984705 4787 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 29 13:16:01 crc kubenswrapper[4787]: W0129 13:16:01.985957 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:01 crc kubenswrapper[4787]: E0129 13:16:01.986048 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.989169 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.995761 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.995810 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.995825 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.998023 4787 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.998050 4787 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 29 13:16:01 crc kubenswrapper[4787]: I0129 13:16:01.998072 4787 state_mem.go:36] "Initialized new in-memory state store" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.016321 4787 policy_none.go:49] "None policy: Start" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.017431 4787 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.017498 4787 state_mem.go:35] "Initializing new in-memory state store" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.021945 4787 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.076609 4787 manager.go:334] "Starting Device Plugin manager" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.076685 4787 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.076701 4787 server.go:79] "Starting device plugin registration server" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.077286 4787 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.077308 4787 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.077582 4787 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.077674 4787 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.077681 4787 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.084874 4787 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.085012 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.086214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.086243 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.086261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.086425 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087003 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087040 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087692 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087748 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.087925 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088110 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088164 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088169 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088848 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.088899 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089106 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089128 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089136 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.089553 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090069 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090089 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.090263 4787 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.090687 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091022 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091059 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091532 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091562 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091746 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091777 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091816 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091834 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.091841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.092440 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.092519 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.092535 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.125250 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="400ms" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151160 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151208 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151232 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151262 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151282 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151302 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151320 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151340 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151360 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151380 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151396 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151413 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151431 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151449 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.151503 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.177625 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.178772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.178830 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.178848 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.178961 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.179552 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.203:6443: connect: connection refused" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252639 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252723 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252764 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252801 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252835 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252872 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252909 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252924 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.252945 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253026 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253046 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253075 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253116 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253135 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253152 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253172 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253189 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253193 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253252 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253298 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253346 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253397 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253447 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253539 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253617 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253661 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253670 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253712 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253681 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.253143 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.380014 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.381993 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.382054 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.382072 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.382109 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.382611 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.203:6443: connect: connection refused" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.433646 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.464837 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.484846 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: W0129 13:16:02.488597 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-3f3cce916fc5ed3faa7e059bdb392f998ac17ef2a719f3441a2244fed1c1ac44 WatchSource:0}: Error finding container 3f3cce916fc5ed3faa7e059bdb392f998ac17ef2a719f3441a2244fed1c1ac44: Status 404 returned error can't find the container with id 3f3cce916fc5ed3faa7e059bdb392f998ac17ef2a719f3441a2244fed1c1ac44 Jan 29 13:16:02 crc kubenswrapper[4787]: W0129 13:16:02.511079 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-e5d8c6eb456a75eeb51ac47484eea686bccb409afb766d766a8193d76d286592 WatchSource:0}: Error finding container e5d8c6eb456a75eeb51ac47484eea686bccb409afb766d766a8193d76d286592: Status 404 returned error can't find the container with id e5d8c6eb456a75eeb51ac47484eea686bccb409afb766d766a8193d76d286592 Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.512180 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.518381 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.526491 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="800ms" Jan 29 13:16:02 crc kubenswrapper[4787]: W0129 13:16:02.537431 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-89a5917d99e3512d82211037c924450b32e05ee290fa1cbaf3b3d678dc3e3bb9 WatchSource:0}: Error finding container 89a5917d99e3512d82211037c924450b32e05ee290fa1cbaf3b3d678dc3e3bb9: Status 404 returned error can't find the container with id 89a5917d99e3512d82211037c924450b32e05ee290fa1cbaf3b3d678dc3e3bb9 Jan 29 13:16:02 crc kubenswrapper[4787]: W0129 13:16:02.554075 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-8344b5da2704e5012257e84c288096180fbd4479da9119d7ba0eb852c8d7d651 WatchSource:0}: Error finding container 8344b5da2704e5012257e84c288096180fbd4479da9119d7ba0eb852c8d7d651: Status 404 returned error can't find the container with id 8344b5da2704e5012257e84c288096180fbd4479da9119d7ba0eb852c8d7d651 Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.783230 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.785495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.785571 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.785595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.785645 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: E0129 13:16:02.786424 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.203:6443: connect: connection refused" node="crc" Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.921387 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.921477 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 18:14:29.867455285 +0000 UTC Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.990062 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e5d8c6eb456a75eeb51ac47484eea686bccb409afb766d766a8193d76d286592"} Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.991141 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f6fddfefc88c4810bda684db34b7f545f42b9c69c6f9ca4b02860567c9f111cc"} Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.992246 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3f3cce916fc5ed3faa7e059bdb392f998ac17ef2a719f3441a2244fed1c1ac44"} Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.993521 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8344b5da2704e5012257e84c288096180fbd4479da9119d7ba0eb852c8d7d651"} Jan 29 13:16:02 crc kubenswrapper[4787]: I0129 13:16:02.994714 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"89a5917d99e3512d82211037c924450b32e05ee290fa1cbaf3b3d678dc3e3bb9"} Jan 29 13:16:03 crc kubenswrapper[4787]: W0129 13:16:03.010008 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.010152 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:03 crc kubenswrapper[4787]: W0129 13:16:03.027145 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.027217 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:03 crc kubenswrapper[4787]: W0129 13:16:03.294147 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.294236 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.327590 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="1.6s" Jan 29 13:16:03 crc kubenswrapper[4787]: W0129 13:16:03.473783 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.473955 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.587532 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.588940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.588984 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.588996 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.589027 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.589646 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.203:6443: connect: connection refused" node="crc" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.816701 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 13:16:03 crc kubenswrapper[4787]: E0129 13:16:03.818732 4787 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.920639 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:03 crc kubenswrapper[4787]: I0129 13:16:03.921584 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 05:57:37.917471904 +0000 UTC Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.000885 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f"} Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.001055 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.002535 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.002568 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.002580 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.009588 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7"} Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.012021 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9"} Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.012416 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.014153 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b"} Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.014265 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.014349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.014375 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.014385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.015426 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.015473 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.015484 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.016365 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e"} Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.016492 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.017436 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.017495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.017509 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:04 crc kubenswrapper[4787]: W0129 13:16:04.920080 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.920234 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:04 crc kubenswrapper[4787]: E0129 13:16:04.920242 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:04 crc kubenswrapper[4787]: I0129 13:16:04.922655 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 08:55:18.0045938 +0000 UTC Jan 29 13:16:04 crc kubenswrapper[4787]: E0129 13:16:04.928258 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="3.2s" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.022930 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9" exitCode=0 Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.023012 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.023112 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.024592 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.024657 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.024684 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.027087 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.027749 4787 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b" exitCode=0 Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.027876 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.027921 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029330 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029327 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029381 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029404 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.029351 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.031349 4787 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e" exitCode=0 Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.031409 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.031533 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.033248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.033288 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.033308 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.034430 4787 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f" exitCode=0 Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.034500 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.034569 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.035703 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.035745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.035760 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.039411 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.039515 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.039580 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.039541 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51"} Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.040956 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.041062 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.041084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.190424 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.192792 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.192842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.192856 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.192896 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:05 crc kubenswrapper[4787]: E0129 13:16:05.193487 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.203:6443: connect: connection refused" node="crc" Jan 29 13:16:05 crc kubenswrapper[4787]: W0129 13:16:05.729136 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:05 crc kubenswrapper[4787]: E0129 13:16:05.729293 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:05 crc kubenswrapper[4787]: W0129 13:16:05.870439 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:05 crc kubenswrapper[4787]: E0129 13:16:05.870644 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.920936 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:05 crc kubenswrapper[4787]: I0129 13:16:05.923076 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 19:40:48.34218551 +0000 UTC Jan 29 13:16:06 crc kubenswrapper[4787]: W0129 13:16:06.008119 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.203:6443: connect: connection refused Jan 29 13:16:06 crc kubenswrapper[4787]: E0129 13:16:06.008210 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.203:6443: connect: connection refused" logger="UnhandledError" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.045274 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.045332 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.045349 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.045380 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.046907 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.046945 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.046960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.062478 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.062531 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.062551 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.062570 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.064336 4787 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739" exitCode=0 Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.064410 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.064610 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.065713 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.065754 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.065773 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.068799 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.068902 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.069170 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"254f1230999b735f7a053b9cc896ac55f3da9c272f825f4b6b7bf2966a147dc4"} Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.069651 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.069670 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.069681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.070490 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.070561 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.070575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:06 crc kubenswrapper[4787]: I0129 13:16:06.924042 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 10:31:00.273767121 +0000 UTC Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.078370 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97"} Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.078530 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.080070 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.080126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.080146 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.084473 4787 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c" exitCode=0 Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.084599 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c"} Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.084672 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.084797 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.084856 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.085379 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086526 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086609 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086820 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086852 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086818 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086932 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.086953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.857898 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 13:16:07 crc kubenswrapper[4787]: I0129 13:16:07.924660 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 20:58:12.351873675 +0000 UTC Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.058020 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.095197 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"20be1b53cfca4976347c3f58061bbbb5b672d728c834ef2998f86fc6f84a4e5c"} Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.095510 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.095776 4787 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.096042 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.097340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.097377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.097386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.098320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.098387 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.098408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.136926 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.394566 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.396040 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.396095 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.396114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.396153 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:08 crc kubenswrapper[4787]: I0129 13:16:08.925448 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 09:13:42.719660056 +0000 UTC Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.106886 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"002e0c00e95756993be14fdbd21f4bfabba5b0a668683566abb40354bea15d76"} Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.106996 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"68163faf875dbb372c1d558714922a4e3dc848c98e7f4214368b8119a60ea5a8"} Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.107003 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.108735 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.108812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.108833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.859196 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.859450 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.861319 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.861411 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.861439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.869591 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:09 crc kubenswrapper[4787]: I0129 13:16:09.925915 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 17:52:27.607308618 +0000 UTC Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.115123 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f1c2ed7638a5c14f9ae0c4d1a4e23d03c9d506a975929e8a463cae889015cae4"} Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.115219 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.115213 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9cd656fcebf1bc45affce940113619ab798b3df1dc867fd90f882fe30ae592d8"} Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.115917 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.116657 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.116661 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.117223 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.117283 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.117309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.120683 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.120725 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.120738 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.120955 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.121005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.121038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:10 crc kubenswrapper[4787]: I0129 13:16:10.926636 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 08:53:44.831298107 +0000 UTC Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.119112 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.119161 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.120789 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.120850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.120864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.121486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.121546 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.121567 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.634476 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.634790 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.636581 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.636650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.636662 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:11 crc kubenswrapper[4787]: I0129 13:16:11.926940 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 07:35:09.598696148 +0000 UTC Jan 29 13:16:12 crc kubenswrapper[4787]: E0129 13:16:12.090725 4787 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.616885 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.617148 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.618739 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.618767 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.618778 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:12 crc kubenswrapper[4787]: I0129 13:16:12.928027 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 18:59:31.70866898 +0000 UTC Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.676558 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.676739 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.678122 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.678216 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.678237 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.680672 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.767709 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:13 crc kubenswrapper[4787]: I0129 13:16:13.929213 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 08:28:19.981959691 +0000 UTC Jan 29 13:16:14 crc kubenswrapper[4787]: I0129 13:16:14.130940 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:14 crc kubenswrapper[4787]: I0129 13:16:14.132269 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:14 crc kubenswrapper[4787]: I0129 13:16:14.132346 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:14 crc kubenswrapper[4787]: I0129 13:16:14.132362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:14 crc kubenswrapper[4787]: I0129 13:16:14.930001 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 18:15:01.074005542 +0000 UTC Jan 29 13:16:15 crc kubenswrapper[4787]: I0129 13:16:15.133679 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:15 crc kubenswrapper[4787]: I0129 13:16:15.135499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:15 crc kubenswrapper[4787]: I0129 13:16:15.135562 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:15 crc kubenswrapper[4787]: I0129 13:16:15.135587 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:15 crc kubenswrapper[4787]: I0129 13:16:15.930399 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 23:37:33.275729606 +0000 UTC Jan 29 13:16:16 crc kubenswrapper[4787]: I0129 13:16:16.677442 4787 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 13:16:16 crc kubenswrapper[4787]: I0129 13:16:16.677586 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:16:16 crc kubenswrapper[4787]: I0129 13:16:16.921118 4787 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 29 13:16:16 crc kubenswrapper[4787]: I0129 13:16:16.931507 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 17:32:25.493270058 +0000 UTC Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.143583 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.146525 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97" exitCode=255 Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.146621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97"} Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.146901 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.148216 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.148278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.148296 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.149290 4787 scope.go:117] "RemoveContainer" containerID="5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97" Jan 29 13:16:17 crc kubenswrapper[4787]: E0129 13:16:17.859983 4787 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 13:16:17 crc kubenswrapper[4787]: I0129 13:16:17.932390 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 04:05:25.430741701 +0000 UTC Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.058407 4787 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.058537 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:16:18 crc kubenswrapper[4787]: E0129 13:16:18.129974 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="6.4s" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.151403 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.153516 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc"} Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.153663 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.155094 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.155147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.155160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:18 crc kubenswrapper[4787]: E0129 13:16:18.397189 4787 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.547813 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.548085 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.549751 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.549817 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.549830 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:18 crc kubenswrapper[4787]: W0129 13:16:18.574747 4787 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.575945 4787 trace.go:236] Trace[1787491692]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 13:16:08.572) (total time: 10003ms): Jan 29 13:16:18 crc kubenswrapper[4787]: Trace[1787491692]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (13:16:18.574) Jan 29 13:16:18 crc kubenswrapper[4787]: Trace[1787491692]: [10.003352855s] [10.003352855s] END Jan 29 13:16:18 crc kubenswrapper[4787]: E0129 13:16:18.576228 4787 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.683505 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.922885 4787 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.923196 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 29 13:16:18 crc kubenswrapper[4787]: I0129 13:16:18.932771 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 04:36:01.677371407 +0000 UTC Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.155938 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.157072 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.157102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.157112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.175349 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 29 13:16:19 crc kubenswrapper[4787]: I0129 13:16:19.935607 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 21:13:00.306793372 +0000 UTC Jan 29 13:16:20 crc kubenswrapper[4787]: I0129 13:16:20.158790 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:20 crc kubenswrapper[4787]: I0129 13:16:20.159828 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:20 crc kubenswrapper[4787]: I0129 13:16:20.159879 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:20 crc kubenswrapper[4787]: I0129 13:16:20.159890 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:20 crc kubenswrapper[4787]: I0129 13:16:20.936932 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 22:42:12.666614699 +0000 UTC Jan 29 13:16:21 crc kubenswrapper[4787]: I0129 13:16:21.937553 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 16:33:36.715861262 +0000 UTC Jan 29 13:16:22 crc kubenswrapper[4787]: E0129 13:16:22.090901 4787 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 29 13:16:22 crc kubenswrapper[4787]: I0129 13:16:22.938643 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 06:18:58.439654644 +0000 UTC Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.062047 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.062232 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.062371 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.063800 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.063865 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.063879 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.071301 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.169581 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.170857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.170906 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.170923 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.889106 4787 trace.go:236] Trace[89254216]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 13:16:10.830) (total time: 13058ms): Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[89254216]: ---"Objects listed" error: 13058ms (13:16:23.888) Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[89254216]: [13.058976522s] [13.058976522s] END Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.889145 4787 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.890596 4787 trace.go:236] Trace[1244022850]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 13:16:10.547) (total time: 13342ms): Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[1244022850]: ---"Objects listed" error: 13342ms (13:16:23.890) Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[1244022850]: [13.342713453s] [13.342713453s] END Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.890645 4787 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.891328 4787 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.891739 4787 trace.go:236] Trace[48308730]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (29-Jan-2026 13:16:09.840) (total time: 14050ms): Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[48308730]: ---"Objects listed" error: 14050ms (13:16:23.891) Jan 29 13:16:23 crc kubenswrapper[4787]: Trace[48308730]: [14.050842097s] [14.050842097s] END Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.891758 4787 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.914292 4787 apiserver.go:52] "Watching apiserver" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.919252 4787 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.919740 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.920438 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.920537 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.920680 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:23 crc kubenswrapper[4787]: E0129 13:16:23.920738 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:23 crc kubenswrapper[4787]: E0129 13:16:23.920851 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.921090 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.921769 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.921890 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:23 crc kubenswrapper[4787]: E0129 13:16:23.921979 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.923405 4787 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.925179 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.925179 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.925265 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.926621 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.926879 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.927141 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.927197 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.934999 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.938593 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.938882 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 17:09:14.884263143 +0000 UTC Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.962906 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.980492 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.983667 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.990929 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992288 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992353 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992385 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992414 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992440 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992479 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992504 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992535 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992560 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992588 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992611 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992632 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992655 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992678 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992701 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992725 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992751 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992779 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992801 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992827 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992820 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992849 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992821 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992879 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992911 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.992984 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993014 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993024 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993037 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993067 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993091 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993116 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993145 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993172 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993194 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993220 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993225 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993244 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993277 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993300 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993324 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993347 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993376 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993400 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993399 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993428 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993480 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993514 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993539 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993565 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993619 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993646 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993678 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993703 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993729 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993753 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993770 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993780 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993817 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993818 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993884 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993911 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993930 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993939 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993997 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994032 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994063 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994091 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994114 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994118 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994159 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994178 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994188 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994227 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994248 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994270 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994292 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994312 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994332 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994333 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994397 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994401 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994419 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994443 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994484 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994490 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994511 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994517 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.993410 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994536 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994582 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994609 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994633 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994637 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994682 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994710 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994737 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994763 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994790 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994789 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994818 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994844 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994867 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994899 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994928 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994956 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.994988 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995017 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995014 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995043 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995046 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995070 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995098 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995122 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995147 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995171 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995198 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995227 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995254 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995279 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995305 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995328 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995337 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995353 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995378 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995406 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995434 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995501 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995519 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995529 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995559 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995587 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995615 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995622 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995645 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995708 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995664 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995741 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995772 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995834 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995865 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995876 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995893 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995921 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995948 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.995979 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996005 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996035 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996065 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996093 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996088 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996121 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996190 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996300 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996332 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996358 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996382 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996410 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.996433 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.998897 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999045 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999075 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999106 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999132 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999167 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999194 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999219 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999248 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999275 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999301 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999328 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999358 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999385 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999411 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999436 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999495 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999519 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 13:16:23 crc kubenswrapper[4787]: I0129 13:16:23.999557 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999584 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999607 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999632 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999693 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999722 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999749 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999775 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999802 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999835 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999858 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999888 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999912 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999936 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999961 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.999986 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000012 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000041 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000066 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000092 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000120 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000146 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000174 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000201 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000228 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000256 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000284 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000311 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000336 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000361 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000385 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000408 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000433 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000484 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000508 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000531 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000556 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000582 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000610 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000639 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000664 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000688 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000713 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000740 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.000814 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001378 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001418 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001450 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001498 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001537 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001568 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001596 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001626 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001654 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001687 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001714 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001740 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001769 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001884 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001901 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001919 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001935 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001952 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001966 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001981 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.001996 4787 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002067 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002084 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002121 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002559 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002578 4787 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002620 4787 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002637 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002651 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002697 4787 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002711 4787 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002747 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002785 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002802 4787 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002818 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002832 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.002846 4787 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.003518 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.012116 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.996104 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.025235 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.996121 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.996309 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.996695 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.997230 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.997894 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.997999 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.998535 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.998603 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:23.998720 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.002990 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.003885 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.004179 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.004512 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.004776 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.005056 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.005300 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.005538 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.006581 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.006793 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.006978 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.007149 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.007386 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.007471 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.007733 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009197 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009285 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009381 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009398 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009631 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009759 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.009856 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.010137 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.010939 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.012324 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.012417 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.012934 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.012950 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013100 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013121 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013380 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013662 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013796 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.013997 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014171 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014601 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014665 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014698 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014705 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.014897 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.015103 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.015194 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.015238 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.024968 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.025146 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.025372 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.026783 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.027224 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.027738 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.027814 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.028289 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.028555 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.028630 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.028889 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.028704 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.029524 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.029647 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030222 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030146 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030264 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030349 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030604 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030894 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.030994 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.031440 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.032535 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.032641 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.032704 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.032826 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.033139 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.033230 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.033641 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.033777 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.034201 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.034363 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.034473 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.034722 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.035334 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.035401 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.036144 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.036199 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.037587 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.026011 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:24.525974995 +0000 UTC m=+23.287235271 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.037860 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.038161 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.038837 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.038892 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.039863 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.040578 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.040630 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:24.537723331 +0000 UTC m=+23.298984137 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.040987 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.041067 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.041144 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.041172 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.038992 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.041631 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.039367 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042005 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.041948 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042156 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042297 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.039339 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042569 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042576 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042633 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.042679 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.045033 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:24.545015405 +0000 UTC m=+23.306275681 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.045331 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042873 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.040588 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.045354 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.042912 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.043073 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.043073 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.043693 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.043851 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.045534 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.043870 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.044145 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.044183 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.044222 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.044194 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.044902 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.045761 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046029 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046130 4787 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046236 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046288 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046440 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046570 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046921 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.046996 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.047019 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.047101 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.047740 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.069535 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.069552 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.069594 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.069882 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.069901 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.070053 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.070411 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.070779 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.070916 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.070932 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.070957 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.070974 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.071051 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:24.57102553 +0000 UTC m=+23.332285806 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.071037 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.071545 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.071759 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072108 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072139 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072123 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072416 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072519 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.072900 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073043 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073125 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073270 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073476 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073335 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073510 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073578 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073731 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073817 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.073835 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.074028 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.075307 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.076022 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.084359 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.084612 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.084648 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.085048 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:24.585017442 +0000 UTC m=+23.346277898 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.085114 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.087155 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.092420 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.098084 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104211 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104696 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104787 4787 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104804 4787 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104818 4787 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104831 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104842 4787 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104856 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104871 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104884 4787 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104897 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104911 4787 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104925 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104943 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104955 4787 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104967 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104980 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.104995 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105008 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105022 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105035 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105048 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105061 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105074 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105086 4787 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105099 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105114 4787 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105126 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105139 4787 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105150 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105164 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105176 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105189 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105238 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105251 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105263 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105274 4787 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105287 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105298 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105313 4787 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105327 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105341 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105352 4787 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105364 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105378 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105390 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105410 4787 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105429 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105441 4787 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105469 4787 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105482 4787 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105496 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105508 4787 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105520 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105532 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105544 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105555 4787 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105567 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105578 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105591 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105603 4787 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105616 4787 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105628 4787 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105642 4787 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105654 4787 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105665 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105682 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105695 4787 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105709 4787 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105721 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105732 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105747 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105758 4787 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105771 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105782 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105794 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105806 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105817 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105831 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105843 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105854 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105866 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105878 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105889 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105911 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105923 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105935 4787 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105947 4787 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105958 4787 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105970 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105983 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.105994 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106006 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106018 4787 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106031 4787 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106049 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106061 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106072 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106085 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106097 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106109 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106122 4787 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106133 4787 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106145 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106157 4787 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106170 4787 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106182 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106194 4787 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106207 4787 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106220 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106232 4787 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106243 4787 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106255 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106266 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106278 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106289 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106301 4787 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106313 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106325 4787 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106338 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106349 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106366 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106377 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106389 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106400 4787 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106411 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106422 4787 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106434 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106445 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106473 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106484 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106508 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106518 4787 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106531 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106542 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106555 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106566 4787 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106579 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106591 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106603 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106615 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106628 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106639 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106651 4787 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106662 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106674 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106685 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106697 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106708 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106719 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106730 4787 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106742 4787 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106753 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106766 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106778 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106802 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106814 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106825 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106837 4787 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106848 4787 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106859 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106870 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106881 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106893 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106904 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106915 4787 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106926 4787 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106943 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.106955 4787 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.107010 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.107162 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.128140 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.137875 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.145982 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.148528 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.189184 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.203488 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.207709 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.207746 4787 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.207760 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.214594 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.229910 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.243627 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.251118 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.256771 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.267059 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.269443 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.274622 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.282357 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: W0129 13:16:24.285093 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-20360a5fe5d7c7db41c5942b597f3d3f2a133b573fe9d3e61110d42a854189bf WatchSource:0}: Error finding container 20360a5fe5d7c7db41c5942b597f3d3f2a133b573fe9d3e61110d42a854189bf: Status 404 returned error can't find the container with id 20360a5fe5d7c7db41c5942b597f3d3f2a133b573fe9d3e61110d42a854189bf Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.292421 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.296088 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.610080 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.610230 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.610281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610316 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:25.610284452 +0000 UTC m=+24.371544868 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.610378 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610482 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.610506 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610512 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610558 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610582 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:25.610560979 +0000 UTC m=+24.371821255 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610585 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610600 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:25.61059328 +0000 UTC m=+24.371853556 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610608 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610643 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610658 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610662 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:25.610644921 +0000 UTC m=+24.371905347 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610671 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.610739 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:25.610732463 +0000 UTC m=+24.371992739 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.797810 4787 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.799613 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.799652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.799666 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.799748 4787 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.806806 4787 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.806925 4787 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.808329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.808386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.808398 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.808420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.808431 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.841708 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.848839 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.848897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.848911 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.848933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.848949 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.862744 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.866425 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.866482 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.866493 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.866519 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.866533 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.881937 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.885682 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.885717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.885728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.885745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.885757 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.895515 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.899121 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.899164 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.899175 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.899195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.899207 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.908387 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 29 13:16:24 crc kubenswrapper[4787]: E0129 13:16:24.908666 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.911062 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.911104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.911119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.911139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.911152 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:24Z","lastTransitionTime":"2026-01-29T13:16:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:24 crc kubenswrapper[4787]: I0129 13:16:24.939812 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 13:11:05.181724868 +0000 UTC Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.014250 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.014287 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.014305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.014320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.014332 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.116801 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.116855 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.116871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.116896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.116911 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.177172 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.177520 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.178823 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" exitCode=255 Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.178884 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.178933 4787 scope.go:117] "RemoveContainer" containerID="5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.179540 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.179742 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.181227 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"97c1c1325f52ce5af0876aa81b93a913334f2113e292b11574881d2a7f916f91"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.183084 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.183155 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.183174 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"20360a5fe5d7c7db41c5942b597f3d3f2a133b573fe9d3e61110d42a854189bf"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.185262 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.185294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"90676ed37e1c5d26597a80b50ad230e712be479941611c6948ecd199f646d4e4"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.216762 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.226435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.226499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.226510 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.226531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.226555 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.237628 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.251314 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.262576 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.273444 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.284661 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.297734 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:16Z\\\",\\\"message\\\":\\\"W0129 13:16:06.283492 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 13:16:06.283859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769692566 cert, and key in /tmp/serving-cert-839508209/serving-signer.crt, /tmp/serving-cert-839508209/serving-signer.key\\\\nI0129 13:16:06.488343 1 observer_polling.go:159] Starting file observer\\\\nW0129 13:16:06.490910 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 13:16:06.491054 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:06.491671 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-839508209/tls.crt::/tmp/serving-cert-839508209/tls.key\\\\\\\"\\\\nF0129 13:16:16.745704 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.318029 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.329180 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.329228 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.329241 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.329261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.329276 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.335313 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.346368 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-zdbwv"] Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.346800 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.348316 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.350727 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.351483 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.352862 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.364612 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.378688 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.393063 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:16Z\\\",\\\"message\\\":\\\"W0129 13:16:06.283492 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 13:16:06.283859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769692566 cert, and key in /tmp/serving-cert-839508209/serving-signer.crt, /tmp/serving-cert-839508209/serving-signer.key\\\\nI0129 13:16:06.488343 1 observer_polling.go:159] Starting file observer\\\\nW0129 13:16:06.490910 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 13:16:06.491054 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:06.491671 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-839508209/tls.crt::/tmp/serving-cert-839508209/tls.key\\\\\\\"\\\\nF0129 13:16:16.745704 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.406694 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.423463 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.432257 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.432299 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.432307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.432324 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.432335 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.444682 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.460707 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.474190 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.488066 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:16Z\\\",\\\"message\\\":\\\"W0129 13:16:06.283492 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 13:16:06.283859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769692566 cert, and key in /tmp/serving-cert-839508209/serving-signer.crt, /tmp/serving-cert-839508209/serving-signer.key\\\\nI0129 13:16:06.488343 1 observer_polling.go:159] Starting file observer\\\\nW0129 13:16:06.490910 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 13:16:06.491054 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:06.491671 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-839508209/tls.crt::/tmp/serving-cert-839508209/tls.key\\\\\\\"\\\\nF0129 13:16:16.745704 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.503527 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.519490 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/63554095-2494-4e27-b2a7-d949955722fa-hosts-file\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.519546 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bdsk\" (UniqueName: \"kubernetes.io/projected/63554095-2494-4e27-b2a7-d949955722fa-kube-api-access-9bdsk\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.524697 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.535371 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.535427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.535447 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.535501 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.535518 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.539688 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.549111 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.561369 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.589611 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621096 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621271 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621325 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621363 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.621317877 +0000 UTC m=+26.382578153 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621494 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621538 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621552 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621603 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/63554095-2494-4e27-b2a7-d949955722fa-hosts-file\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621634 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.621607505 +0000 UTC m=+26.382867811 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621667 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bdsk\" (UniqueName: \"kubernetes.io/projected/63554095-2494-4e27-b2a7-d949955722fa-kube-api-access-9bdsk\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.621692 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/63554095-2494-4e27-b2a7-d949955722fa-hosts-file\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621841 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621866 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621883 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621897 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.621939 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.621927403 +0000 UTC m=+26.383187769 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.622028 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.621997555 +0000 UTC m=+26.383257871 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.622040 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.622055 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.622067 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.622099 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.622089477 +0000 UTC m=+26.383349833 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.625678 4787 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.638491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.638538 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.638561 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.638582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.638597 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.642542 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bdsk\" (UniqueName: \"kubernetes.io/projected/63554095-2494-4e27-b2a7-d949955722fa-kube-api-access-9bdsk\") pod \"node-resolver-zdbwv\" (UID: \"63554095-2494-4e27-b2a7-d949955722fa\") " pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.658713 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-zdbwv" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.725511 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-q79sn"] Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.725996 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.726884 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-j6wn4"] Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.727170 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730364 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730446 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730509 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730620 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730665 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730628 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-fdf9c"] Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.730821 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.731182 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.731362 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.731435 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.731649 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.734677 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 13:16:25 crc kubenswrapper[4787]: W0129 13:16:25.739950 4787 reflector.go:561] object-"openshift-multus"/"default-cni-sysctl-allowlist": failed to list *v1.ConfigMap: configmaps "default-cni-sysctl-allowlist" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.740015 4787 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"default-cni-sysctl-allowlist\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.749335 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754189 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754500 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754588 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.754603 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.805295 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824250 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824318 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-system-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824345 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqczt\" (UniqueName: \"kubernetes.io/projected/6311862b-6ca2-4dba-85e0-6829dd45c2db-kube-api-access-pqczt\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824373 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-binary-copy\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824402 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824423 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-os-release\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824446 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6311862b-6ca2-4dba-85e0-6829dd45c2db-rootfs\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824491 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-netns\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824517 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-k8s-cni-cncf-io\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824541 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-socket-dir-parent\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824595 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdgvk\" (UniqueName: \"kubernetes.io/projected/d2526766-68ea-4959-a656-b0c68c754890-kube-api-access-gdgvk\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824648 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-cnibin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824665 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-multus-certs\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824683 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-hostroot\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824711 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-multus-daemon-config\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824745 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-system-cni-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824765 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-cnibin\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824788 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46wtj\" (UniqueName: \"kubernetes.io/projected/a11db361-58df-40d6-ba72-c59df0ed819c-kube-api-access-46wtj\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824862 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-multus\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824894 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6311862b-6ca2-4dba-85e0-6829dd45c2db-proxy-tls\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824921 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-conf-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.824954 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6311862b-6ca2-4dba-85e0-6829dd45c2db-mcd-auth-proxy-config\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825028 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-bin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825060 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-etc-kubernetes\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825097 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-cni-binary-copy\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825129 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-kubelet\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825157 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-os-release\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.825177 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.853294 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:16Z\\\",\\\"message\\\":\\\"W0129 13:16:06.283492 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 13:16:06.283859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769692566 cert, and key in /tmp/serving-cert-839508209/serving-signer.crt, /tmp/serving-cert-839508209/serving-signer.key\\\\nI0129 13:16:06.488343 1 observer_polling.go:159] Starting file observer\\\\nW0129 13:16:06.490910 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 13:16:06.491054 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:06.491671 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-839508209/tls.crt::/tmp/serving-cert-839508209/tls.key\\\\\\\"\\\\nF0129 13:16:16.745704 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.857434 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.857478 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.857487 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.857502 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.857511 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.905162 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925692 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-multus\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925761 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6311862b-6ca2-4dba-85e0-6829dd45c2db-proxy-tls\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925785 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-cnibin\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925807 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46wtj\" (UniqueName: \"kubernetes.io/projected/a11db361-58df-40d6-ba72-c59df0ed819c-kube-api-access-46wtj\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925832 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-conf-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925853 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6311862b-6ca2-4dba-85e0-6829dd45c2db-mcd-auth-proxy-config\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925872 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-bin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925899 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-etc-kubernetes\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925929 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-cni-binary-copy\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925922 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-cnibin\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925975 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-bin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925951 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-kubelet\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926010 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-kubelet\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926013 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-etc-kubernetes\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.925934 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-conf-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926064 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-os-release\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926091 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926120 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-system-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926146 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqczt\" (UniqueName: \"kubernetes.io/projected/6311862b-6ca2-4dba-85e0-6829dd45c2db-kube-api-access-pqczt\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926170 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-binary-copy\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926192 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926243 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926265 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-os-release\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926290 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6311862b-6ca2-4dba-85e0-6829dd45c2db-rootfs\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926330 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-k8s-cni-cncf-io\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926357 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-netns\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926387 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdgvk\" (UniqueName: \"kubernetes.io/projected/d2526766-68ea-4959-a656-b0c68c754890-kube-api-access-gdgvk\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926412 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-socket-dir-parent\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926435 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-cnibin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926475 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-multus-certs\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926501 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-hostroot\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926521 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-multus-daemon-config\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926555 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-system-cni-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926637 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-system-cni-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926668 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-var-lib-cni-multus\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926671 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-netns\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926833 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-cni-binary-copy\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926894 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-cnibin\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.926991 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6311862b-6ca2-4dba-85e0-6829dd45c2db-mcd-auth-proxy-config\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927028 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-socket-dir-parent\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927028 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-system-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927057 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-hostroot\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927074 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-k8s-cni-cncf-io\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927078 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-binary-copy\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927076 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-os-release\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927102 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-host-run-multus-certs\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927110 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-os-release\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927075 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6311862b-6ca2-4dba-85e0-6829dd45c2db-rootfs\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927273 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/d2526766-68ea-4959-a656-b0c68c754890-multus-cni-dir\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927667 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/d2526766-68ea-4959-a656-b0c68c754890-multus-daemon-config\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.927706 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a11db361-58df-40d6-ba72-c59df0ed819c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.931246 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6311862b-6ca2-4dba-85e0-6829dd45c2db-proxy-tls\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.940428 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 10:06:53.169437983 +0000 UTC Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.943540 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.945865 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46wtj\" (UniqueName: \"kubernetes.io/projected/a11db361-58df-40d6-ba72-c59df0ed819c-kube-api-access-46wtj\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.951117 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.955184 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdgvk\" (UniqueName: \"kubernetes.io/projected/d2526766-68ea-4959-a656-b0c68c754890-kube-api-access-gdgvk\") pod \"multus-j6wn4\" (UID: \"d2526766-68ea-4959-a656-b0c68c754890\") " pod="openshift-multus/multus-j6wn4" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.955306 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqczt\" (UniqueName: \"kubernetes.io/projected/6311862b-6ca2-4dba-85e0-6829dd45c2db-kube-api-access-pqczt\") pod \"machine-config-daemon-q79sn\" (UID: \"6311862b-6ca2-4dba-85e0-6829dd45c2db\") " pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.964532 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.964595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.964628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.964650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.964664 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:25Z","lastTransitionTime":"2026-01-29T13:16:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.965765 4787 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.979536 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.985308 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.985397 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.985327 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.985535 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.985703 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:25 crc kubenswrapper[4787]: E0129 13:16:25.985878 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.989546 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.990167 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.991028 4787 csr.go:261] certificate signing request csr-d25bl is approved, waiting to be issued Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.991925 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.992821 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.994118 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 29 13:16:25 crc kubenswrapper[4787]: I0129 13:16:25.994865 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.000516 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.001430 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.003145 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.003657 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.004276 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.004830 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.007236 4787 csr.go:257] certificate signing request csr-d25bl is issued Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.010778 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.011418 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.012374 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.012918 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.013904 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.014509 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.014933 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.015859 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.016470 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.016978 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.019805 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.020330 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.020672 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.021412 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.021925 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.023122 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.023823 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.025387 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.026137 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.027185 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.027678 4787 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.027783 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.029428 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.030595 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.031026 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.032573 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.032629 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.033615 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.034164 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.035158 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.035838 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.036730 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.037329 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.038311 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.038917 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.039776 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.040303 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.041203 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.042041 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.042936 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.043422 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.044234 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.044198 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.044838 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.045409 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.046253 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.049945 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: W0129 13:16:26.058334 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6311862b_6ca2_4dba_85e0_6829dd45c2db.slice/crio-368a74e27c0a55b4ed6385b2432f1c0b22b06ccb7b5f33e4e4f3bf27c32947d7 WatchSource:0}: Error finding container 368a74e27c0a55b4ed6385b2432f1c0b22b06ccb7b5f33e4e4f3bf27c32947d7: Status 404 returned error can't find the container with id 368a74e27c0a55b4ed6385b2432f1c0b22b06ccb7b5f33e4e4f3bf27c32947d7 Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.059471 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j6wn4" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.066884 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.066962 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.066977 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.067274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.067304 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.067466 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.097483 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.110130 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pq2mb"] Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.111031 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.115483 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117522 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117568 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117850 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117888 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117958 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.117979 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.118268 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.132446 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.146334 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.156825 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.167248 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.170257 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.170362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.170436 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.170529 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.170591 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.181348 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.188924 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"368a74e27c0a55b4ed6385b2432f1c0b22b06ccb7b5f33e4e4f3bf27c32947d7"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.190081 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerStarted","Data":"8e0844eab0cde159ac1dfc1fcbffc4377a88a337487d88fe7b45b57476e89189"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.196708 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-zdbwv" event={"ID":"63554095-2494-4e27-b2a7-d949955722fa","Type":"ContainerStarted","Data":"c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.196763 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-zdbwv" event={"ID":"63554095-2494-4e27-b2a7-d949955722fa","Type":"ContainerStarted","Data":"b66102b5a42a07e697f3c4af961256bfcc62a825ac797dc25d3658c0a968956a"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.198473 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.201690 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5d92f4bae947eaf6d58abbc4b5f4f306c944b661d16ce6c34442c742724aba97\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:16Z\\\",\\\"message\\\":\\\"W0129 13:16:06.283492 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0129 13:16:06.283859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769692566 cert, and key in /tmp/serving-cert-839508209/serving-signer.crt, /tmp/serving-cert-839508209/serving-signer.key\\\\nI0129 13:16:06.488343 1 observer_polling.go:159] Starting file observer\\\\nW0129 13:16:06.490910 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0129 13:16:06.491054 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:06.491671 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-839508209/tls.crt::/tmp/serving-cert-839508209/tls.key\\\\\\\"\\\\nF0129 13:16:16.745704 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.208721 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:16:26 crc kubenswrapper[4787]: E0129 13:16:26.208919 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.227610 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229533 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229567 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229593 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229633 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229663 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229696 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229714 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229742 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229782 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlqsj\" (UniqueName: \"kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229802 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229822 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.229899 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230001 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230040 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230144 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230176 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230211 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230253 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230281 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.230308 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.241575 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.255603 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.270748 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.272858 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.272887 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.272897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.272913 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.272925 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.282915 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.298091 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.311074 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.326930 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331339 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331391 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331411 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331428 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331443 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331472 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlqsj\" (UniqueName: \"kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331502 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331517 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331532 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331560 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331575 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331611 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331626 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331644 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331662 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331680 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331699 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331739 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331757 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331774 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331842 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331879 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331905 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331932 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331954 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.331976 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.332488 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.332536 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.332565 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.332597 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333330 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333411 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333449 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333522 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333582 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333634 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.333683 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.334036 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.338631 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.344803 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.354750 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlqsj\" (UniqueName: \"kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj\") pod \"ovnkube-node-pq2mb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.361930 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375310 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375350 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375359 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375384 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.375788 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.386966 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.401012 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.414748 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.431177 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.433866 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: W0129 13:16:26.449643 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55309602_3b5c_4506_8cad_0c1609e2b1cb.slice/crio-ba1d20726317a0f2bc7348ce80e895451b6bfaaa56fd69a9a9939c6428d63ca0 WatchSource:0}: Error finding container ba1d20726317a0f2bc7348ce80e895451b6bfaaa56fd69a9a9939c6428d63ca0: Status 404 returned error can't find the container with id ba1d20726317a0f2bc7348ce80e895451b6bfaaa56fd69a9a9939c6428d63ca0 Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.477197 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.477336 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.477397 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.477515 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.477592 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.493125 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.588720 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.589356 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.589379 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.589415 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.589436 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.692890 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.692930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.692939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.692962 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.692973 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.795802 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.796061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.796132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.796233 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.796324 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.900427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.900760 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.900874 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.900974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.901042 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:26Z","lastTransitionTime":"2026-01-29T13:16:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:26 crc kubenswrapper[4787]: E0129 13:16:26.927329 4787 configmap.go:193] Couldn't get configMap openshift-multus/default-cni-sysctl-allowlist: failed to sync configmap cache: timed out waiting for the condition Jan 29 13:16:26 crc kubenswrapper[4787]: E0129 13:16:26.927525 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist podName:a11db361-58df-40d6-ba72-c59df0ed819c nodeName:}" failed. No retries permitted until 2026-01-29 13:16:27.427487315 +0000 UTC m=+26.188747601 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-sysctl-allowlist" (UniqueName: "kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist") pod "multus-additional-cni-plugins-fdf9c" (UID: "a11db361-58df-40d6-ba72-c59df0ed819c") : failed to sync configmap cache: timed out waiting for the condition Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.941610 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 23:09:52.023674718 +0000 UTC Jan 29 13:16:26 crc kubenswrapper[4787]: I0129 13:16:26.992278 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.007919 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.007974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.007984 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.008006 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.008020 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.008251 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-29 13:11:25 +0000 UTC, rotation deadline is 2026-12-04 19:11:07.304069733 +0000 UTC Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.008287 4787 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7421h54m40.295785773s for next certificate rotation Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.110600 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.110642 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.110652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.110669 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.110680 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.213476 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.213530 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.213540 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.213559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.213574 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.221784 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerStarted","Data":"c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.223313 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" exitCode=0 Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.223379 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.223409 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"ba1d20726317a0f2bc7348ce80e895451b6bfaaa56fd69a9a9939c6428d63ca0"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.227753 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.227793 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.230027 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.242273 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.256269 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.270911 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.283315 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.298927 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.313193 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.316048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.316075 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.316086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.316103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.316114 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.327658 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.339013 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.348112 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.359771 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.372049 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.384000 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.416629 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.418377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.418410 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.418424 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.418445 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.418470 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.430554 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.442974 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.451058 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.451748 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a11db361-58df-40d6-ba72-c59df0ed819c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fdf9c\" (UID: \"a11db361-58df-40d6-ba72-c59df0ed819c\") " pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.456734 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.471365 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.484188 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.503690 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.519385 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.521154 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.521192 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.521202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.521221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.521235 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.533946 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.547721 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.567444 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.567688 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.583757 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.604503 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.624736 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.624783 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.624795 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.624815 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.624828 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.642214 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.653619 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.653749 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.653785 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.653805 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.653874 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.653901 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:31.653855851 +0000 UTC m=+30.415116127 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.653949 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:31.653937513 +0000 UTC m=+30.415197789 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.654064 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654303 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654303 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654341 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654360 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654363 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654320 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654408 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:31.654388084 +0000 UTC m=+30.415648490 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654418 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654436 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:31.654425225 +0000 UTC m=+30.415685721 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.654491 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:31.654479016 +0000 UTC m=+30.415739412 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.658872 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-z5mvv"] Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.659382 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.661471 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.661506 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.661773 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.663591 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.675222 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.689342 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.703051 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.719299 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.728404 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.728480 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.728503 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.728529 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.728546 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.732525 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.744831 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.755546 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a8e24ae3-482d-462d-8c7f-2dfa9223a866-serviceca\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.755648 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a8e24ae3-482d-462d-8c7f-2dfa9223a866-host\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.755672 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh69l\" (UniqueName: \"kubernetes.io/projected/a8e24ae3-482d-462d-8c7f-2dfa9223a866-kube-api-access-qh69l\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.757425 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.769927 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.787006 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.800362 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.821040 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.830797 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.830851 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.830862 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.830882 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.830895 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.834444 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.850191 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.856207 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a8e24ae3-482d-462d-8c7f-2dfa9223a866-host\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.856243 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh69l\" (UniqueName: \"kubernetes.io/projected/a8e24ae3-482d-462d-8c7f-2dfa9223a866-kube-api-access-qh69l\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.856275 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a8e24ae3-482d-462d-8c7f-2dfa9223a866-serviceca\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.856359 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a8e24ae3-482d-462d-8c7f-2dfa9223a866-host\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.857338 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a8e24ae3-482d-462d-8c7f-2dfa9223a866-serviceca\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.870289 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:27Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.883192 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh69l\" (UniqueName: \"kubernetes.io/projected/a8e24ae3-482d-462d-8c7f-2dfa9223a866-kube-api-access-qh69l\") pod \"node-ca-z5mvv\" (UID: \"a8e24ae3-482d-462d-8c7f-2dfa9223a866\") " pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.933532 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.933583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.933597 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.933617 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.933631 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:27Z","lastTransitionTime":"2026-01-29T13:16:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.942915 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 17:10:00.965138582 +0000 UTC Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.985485 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.986099 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.986158 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.986203 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:27 crc kubenswrapper[4787]: I0129 13:16:27.986247 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:27 crc kubenswrapper[4787]: E0129 13:16:27.986295 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.011645 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-z5mvv" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.037660 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.037699 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.037710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.037728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.037741 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.140712 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.140759 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.140771 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.140795 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.140809 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.237575 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerStarted","Data":"e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.237649 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerStarted","Data":"71d156fca8cbb48fbb09ea06ea75006cfd29f4f3281b3ac8910844c8eb14a6fc"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.242939 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243032 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243046 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243057 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243068 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243078 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243789 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243826 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243844 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.243874 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.244602 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-z5mvv" event={"ID":"a8e24ae3-482d-462d-8c7f-2dfa9223a866","Type":"ContainerStarted","Data":"fa958697d4b95cb822ee7d5b75de2d636daf6881cf992938b1abb00d7b685f17"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.255694 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.274501 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.293472 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.308974 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.320491 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.333662 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.345180 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.347340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.347377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.347389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.347408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.347422 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.357262 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.370334 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.382084 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.397267 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.431707 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.450765 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.451058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.451141 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.451227 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.451309 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.451208 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.463776 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:28Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.557513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.558051 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.558065 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.558081 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.558091 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.661013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.661059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.661072 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.661093 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.661104 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.764540 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.764587 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.764599 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.764619 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.764632 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.866834 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.866872 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.866882 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.866901 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.866913 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.943547 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 01:23:21.70655367 +0000 UTC Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.970092 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.970141 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.970156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.970178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:28 crc kubenswrapper[4787]: I0129 13:16:28.970192 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:28Z","lastTransitionTime":"2026-01-29T13:16:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.072766 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.072815 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.072824 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.072841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.072854 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.175510 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.175553 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.175565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.175585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.175598 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.250602 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-z5mvv" event={"ID":"a8e24ae3-482d-462d-8c7f-2dfa9223a866","Type":"ContainerStarted","Data":"9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.252512 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de" exitCode=0 Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.252556 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.272100 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.278625 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.278792 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.278892 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.279021 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.279126 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.289669 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.307236 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.324638 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.342864 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.357879 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.370064 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.381609 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.383292 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.383344 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.383357 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.383376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.383391 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.396273 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.409902 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.437649 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.453080 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.477071 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.487009 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.487068 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.487086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.487112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.487132 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.492054 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.508085 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.524797 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.538641 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.553888 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.568032 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.581252 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.590054 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.590110 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.590124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.590153 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.590170 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.596242 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.608577 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.625784 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.640400 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.660922 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.673009 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.686028 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.692668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.692717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.692730 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.692751 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.692769 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.701312 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:29Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.797585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.797634 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.797643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.797666 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.797678 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.901522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.901610 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.901631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.901981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.902202 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:29Z","lastTransitionTime":"2026-01-29T13:16:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.944439 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 21:27:19.55121698 +0000 UTC Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.985647 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:29 crc kubenswrapper[4787]: E0129 13:16:29.985797 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.985654 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:29 crc kubenswrapper[4787]: I0129 13:16:29.985849 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:29 crc kubenswrapper[4787]: E0129 13:16:29.986174 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:29 crc kubenswrapper[4787]: E0129 13:16:29.986259 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.006227 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.006274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.006285 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.006309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.006325 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.109718 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.109782 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.109805 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.109837 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.109857 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.213189 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.213299 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.213325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.213983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.214273 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.259978 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3" exitCode=0 Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.260430 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.288042 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.314445 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.318610 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.318696 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.318714 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.318739 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.318766 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.340758 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.368210 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.388265 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.406866 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.421746 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.423301 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.423367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.423378 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.423471 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.423485 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.434176 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.451913 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.470583 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.485935 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.501004 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.515737 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.526165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.526214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.526230 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.526255 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.526271 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.536249 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:30Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.629714 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.629750 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.629764 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.629789 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.629802 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.732042 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.732110 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.732125 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.732151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.732170 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.835665 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.835724 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.835737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.835759 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.835774 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.939012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.939084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.939102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.939133 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.939153 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:30Z","lastTransitionTime":"2026-01-29T13:16:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:30 crc kubenswrapper[4787]: I0129 13:16:30.945360 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 21:48:23.202620488 +0000 UTC Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.041884 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.041917 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.041925 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.041941 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.041952 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.145992 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.146094 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.146114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.146147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.146170 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.249619 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.249704 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.249723 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.249752 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.249773 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.265969 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32" exitCode=0 Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.266050 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.273365 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.288868 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.307084 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.324203 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.339032 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.353061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.353108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.353119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.353138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.353151 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.357338 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.375001 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.391966 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.405572 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.422288 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.436235 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.455960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.456031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.456046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.456067 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.456111 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.459958 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.476422 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.490269 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.506498 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:31Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.559285 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.559332 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.559343 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.559363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.559376 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.663263 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.663309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.663322 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.663343 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.663356 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.708342 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.708663 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.708766 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:39.70870979 +0000 UTC m=+38.469970146 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.708865 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.708970 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.709094 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709125 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709160 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:39.70909973 +0000 UTC m=+38.470360036 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709233 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:39.709204373 +0000 UTC m=+38.470464809 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709265 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709283 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709297 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.709314 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709330 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:39.709321386 +0000 UTC m=+38.470581652 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709553 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709581 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709603 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.709676 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:39.709654794 +0000 UTC m=+38.470915100 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.762145 4787 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.787208 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.787268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.787285 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.787312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.787330 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.890357 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.890415 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.890430 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.890474 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.890491 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.946955 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 10:37:43.592551468 +0000 UTC Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.985816 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.985830 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.986064 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.986130 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.986303 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:31 crc kubenswrapper[4787]: E0129 13:16:31.986413 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.993446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.993522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.993552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.993572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:31 crc kubenswrapper[4787]: I0129 13:16:31.993585 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:31Z","lastTransitionTime":"2026-01-29T13:16:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.011678 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.029390 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.052868 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.068339 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.081164 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.101382 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.101446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.101496 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.101526 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.101549 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.113394 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.134673 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.157245 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.175895 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.201882 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.205948 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.206174 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.206341 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.206568 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.206740 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.218128 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.237596 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.257440 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.272943 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.279713 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201" exitCode=0 Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.279759 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.295717 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.311531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.311569 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.311585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.311612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.311628 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.317906 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.338231 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.353555 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.367090 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.378237 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.390137 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.406472 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.414871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.414920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.414930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.414952 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.414964 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.422302 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.444267 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.457149 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.475677 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.526989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.527048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.527070 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.527096 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.527119 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.530744 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.551717 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.629425 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.629495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.629508 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.629529 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.629541 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.733918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.734002 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.734018 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.734042 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.734057 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.837489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.837581 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.837601 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.837632 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.837653 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.941188 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.941247 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.941261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.941281 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.941294 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:32Z","lastTransitionTime":"2026-01-29T13:16:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:32 crc kubenswrapper[4787]: I0129 13:16:32.947611 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 19:30:24.009195814 +0000 UTC Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.044698 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.045313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.045334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.045361 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.045382 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.148594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.148648 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.148661 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.148696 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.148708 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.251537 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.251601 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.251614 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.251633 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.251650 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.284324 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerStarted","Data":"1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.293522 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.293837 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.304761 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.321514 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.347744 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.354522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.354820 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.354922 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.355029 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.355167 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.355580 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.362828 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.379610 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.393492 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.414667 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.432430 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.446090 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.458325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.458391 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.458411 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.458439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.458487 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.460954 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.474286 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.496986 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.513288 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.528803 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.542260 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.561046 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.561969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.562007 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.562019 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.562038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.562049 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.583867 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.603345 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.624318 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.638977 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.653879 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.665263 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.665320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.665333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.665355 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.665369 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.669154 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.682285 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.703396 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.732910 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.753873 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.767896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.767942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.767959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.767981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.768002 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.790176 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.808358 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:33Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.814425 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.815225 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:16:33 crc kubenswrapper[4787]: E0129 13:16:33.815389 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.870766 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.870819 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.870831 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.870848 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.870861 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.949058 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 19:16:32.883938819 +0000 UTC Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.973661 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.973708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.973721 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.973745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.973760 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:33Z","lastTransitionTime":"2026-01-29T13:16:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.985431 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.985507 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:33 crc kubenswrapper[4787]: E0129 13:16:33.985605 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:33 crc kubenswrapper[4787]: I0129 13:16:33.985444 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:33 crc kubenswrapper[4787]: E0129 13:16:33.985769 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:33 crc kubenswrapper[4787]: E0129 13:16:33.985869 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.077427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.077504 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.077517 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.077545 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.077561 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.180432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.180537 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.180555 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.180579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.180592 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.283786 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.283857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.283876 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.283909 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.283929 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.297728 4787 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.298320 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.329374 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.349746 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.371759 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.386400 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.386470 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.386484 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.386504 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.386519 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.394966 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.410986 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.432254 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.455764 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.480530 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.489769 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.489873 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.489933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.489971 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.490003 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.504686 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.524124 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.540835 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.560351 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.574117 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.587649 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.592575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.592612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.592626 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.592648 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.592662 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.607921 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.696407 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.696532 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.696557 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.696588 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.696611 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.800599 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.800668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.800684 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.800710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.800739 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.911188 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.911260 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.911280 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.911307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.911328 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.927958 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.928020 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.928035 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.928065 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.928080 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.949432 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 01:16:04.339498662 +0000 UTC Jan 29 13:16:34 crc kubenswrapper[4787]: E0129 13:16:34.949663 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.954628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.954695 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.954715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.954748 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.954767 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:34 crc kubenswrapper[4787]: E0129 13:16:34.974122 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.979703 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.979769 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.979793 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.979827 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:34 crc kubenswrapper[4787]: I0129 13:16:34.979851 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:34Z","lastTransitionTime":"2026-01-29T13:16:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.003807 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:34Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.009856 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.009942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.009969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.010005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.010032 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.031704 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:35Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.037624 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.037711 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.037733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.037772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.037796 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.068199 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:35Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.068362 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.070249 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.070320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.070340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.070362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.070381 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.173304 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.173374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.173389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.173412 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.173431 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.276755 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.276808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.276820 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.276841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.276854 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.301432 4787 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.380178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.380264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.380284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.380315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.380336 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.484047 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.484124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.484149 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.484188 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.484216 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.587229 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.587305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.587331 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.587359 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.587381 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.691014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.691117 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.691147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.691189 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.691223 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.804690 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.804763 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.804783 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.804813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.804837 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.907903 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.907967 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.907989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.908015 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.908033 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:35Z","lastTransitionTime":"2026-01-29T13:16:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.950516 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 16:26:26.778711801 +0000 UTC Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.985402 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.985489 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.985665 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:35 crc kubenswrapper[4787]: I0129 13:16:35.985816 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.985919 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:35 crc kubenswrapper[4787]: E0129 13:16:35.986167 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.011200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.011267 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.011284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.011314 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.011336 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.115279 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.115356 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.115375 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.115409 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.115430 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.219119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.219188 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.219211 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.219243 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.219269 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.310950 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b" exitCode=0 Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.311061 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.311860 4787 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.322358 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.322418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.322435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.322494 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.322514 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.342208 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.373682 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.405421 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.423157 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.425903 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.426023 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.426119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.426204 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.426228 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.446370 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.470323 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.492758 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.509979 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.529354 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.529424 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.529445 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.529502 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.529528 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.538532 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.563919 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.594064 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.607306 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.632988 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.633057 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.633077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.633104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.633124 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.634752 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.650552 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.737017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.737096 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.737124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.737189 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.737222 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.840647 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.841008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.841784 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.841863 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.841887 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.946048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.946108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.946128 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.946166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.946181 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:36Z","lastTransitionTime":"2026-01-29T13:16:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:36 crc kubenswrapper[4787]: I0129 13:16:36.951300 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 07:56:47.714504383 +0000 UTC Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.050119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.050172 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.050184 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.050207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.050222 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.154521 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.154605 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.154631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.154667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.154691 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.258574 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.258649 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.258673 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.258711 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.258738 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.320515 4787 generic.go:334] "Generic (PLEG): container finished" podID="a11db361-58df-40d6-ba72-c59df0ed819c" containerID="41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e" exitCode=0 Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.320621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerDied","Data":"41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.343936 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.362298 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.369757 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.369868 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.369900 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.369950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.369980 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.387942 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.411719 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.435252 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.457435 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.473263 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.473797 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.474049 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.474236 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.474415 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.484137 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.501411 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.524147 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.547545 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.573398 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.578368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.578426 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.578450 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.578526 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.578554 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.594392 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.610497 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.629858 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.682088 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.682141 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.682156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.682178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.682190 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.786563 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.786657 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.786675 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.786708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.786732 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.890260 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.890351 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.890371 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.890402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.890423 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.951781 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 17:55:36.187012805 +0000 UTC Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.978349 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c"] Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.979185 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.982110 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.982975 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.984829 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:37 crc kubenswrapper[4787]: E0129 13:16:37.985006 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.985515 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:37 crc kubenswrapper[4787]: E0129 13:16:37.985634 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.985889 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:37 crc kubenswrapper[4787]: E0129 13:16:37.986004 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.993906 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.993953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.993970 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.993993 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:37 crc kubenswrapper[4787]: I0129 13:16:37.994010 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:37Z","lastTransitionTime":"2026-01-29T13:16:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.029316 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:37Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.044587 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.063236 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.079412 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-env-overrides\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.079788 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g848\" (UniqueName: \"kubernetes.io/projected/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-kube-api-access-6g848\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.079975 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.080146 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.083125 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.097865 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.098186 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.098363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.098613 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.098785 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.104010 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.120838 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.136232 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.149678 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.164841 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.181156 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.181262 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.181323 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-env-overrides\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.181721 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g848\" (UniqueName: \"kubernetes.io/projected/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-kube-api-access-6g848\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.182027 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.182197 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-env-overrides\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.185139 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.195567 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.203401 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.203715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.203897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.203989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.204067 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.204355 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g848\" (UniqueName: \"kubernetes.io/projected/e7c3c3cd-3c8a-4dea-b37b-cff3137613ab-kube-api-access-6g848\") pod \"ovnkube-control-plane-749d76644c-vqw5c\" (UID: \"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.217855 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.235921 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.250498 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.265379 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.284745 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:38Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.301976 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.307953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.308010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.308048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.308076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.308097 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: W0129 13:16:38.323321 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7c3c3cd_3c8a_4dea_b37b_cff3137613ab.slice/crio-2dac1965495100b45137dd660fb434a35b34d584a67a8e004a0ccfbc0a20052e WatchSource:0}: Error finding container 2dac1965495100b45137dd660fb434a35b34d584a67a8e004a0ccfbc0a20052e: Status 404 returned error can't find the container with id 2dac1965495100b45137dd660fb434a35b34d584a67a8e004a0ccfbc0a20052e Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.411647 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.411698 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.411710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.411731 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.411745 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.514495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.514546 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.514559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.514579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.514592 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.617678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.617734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.617743 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.617763 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.617780 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.721386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.721432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.721444 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.721482 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.721494 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.824026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.824091 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.824109 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.824129 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.824145 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.927385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.927448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.927479 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.927502 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.927520 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:38Z","lastTransitionTime":"2026-01-29T13:16:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:38 crc kubenswrapper[4787]: I0129 13:16:38.952655 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 10:08:26.036853363 +0000 UTC Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.030953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.031041 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.031064 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.031095 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.031117 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.134429 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.134559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.134583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.134618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.134648 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.238812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.238862 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.238872 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.238897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.238911 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.330803 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" event={"ID":"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab","Type":"ContainerStarted","Data":"2dac1965495100b45137dd660fb434a35b34d584a67a8e004a0ccfbc0a20052e"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.342193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.342246 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.342256 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.342279 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.342293 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.445929 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.445998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.446026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.446063 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.446089 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.549606 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.549667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.549681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.549706 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.549726 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.652670 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.652721 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.652731 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.652751 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.652765 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.755444 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.755525 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.755538 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.755565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.755576 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.800589 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.800768 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:16:55.800737364 +0000 UTC m=+54.561997640 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.801035 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.801131 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801223 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801248 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801262 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801303 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:55.801293688 +0000 UTC m=+54.562553964 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.801216 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801400 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801463 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801479 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.801548 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801578 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:55.801552605 +0000 UTC m=+54.562812881 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801658 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801726 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:55.801715329 +0000 UTC m=+54.562975785 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801665 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.801760 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:55.80175294 +0000 UTC m=+54.563013216 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.859328 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.859376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.859389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.859408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.859418 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.953612 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 18:42:22.844596873 +0000 UTC Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.962791 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.962864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.962877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.962899 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.962935 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:39Z","lastTransitionTime":"2026-01-29T13:16:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.985577 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.985616 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:39 crc kubenswrapper[4787]: I0129 13:16:39.985589 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.985734 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.985845 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:39 crc kubenswrapper[4787]: E0129 13:16:39.985953 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.086954 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.087237 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.087600 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.087781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.087946 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.190891 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.190921 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.190930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.190948 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.190959 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.260888 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-gkrsx"] Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.261824 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: E0129 13:16:40.261982 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.283824 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.293680 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.293728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.293741 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.293764 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.293780 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.305005 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.306645 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.306724 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5dq8\" (UniqueName: \"kubernetes.io/projected/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-kube-api-access-j5dq8\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.331088 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.336990 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" event={"ID":"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab","Type":"ContainerStarted","Data":"9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.337090 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" event={"ID":"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab","Type":"ContainerStarted","Data":"8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.341679 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" event={"ID":"a11db361-58df-40d6-ba72-c59df0ed819c","Type":"ContainerStarted","Data":"a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.343716 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/0.log" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.346744 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83" exitCode=1 Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.346818 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.347722 4787 scope.go:117] "RemoveContainer" containerID="e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.349697 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.365606 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.380338 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.393826 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.395927 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.395959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.395971 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.395989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.396000 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.409495 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.409707 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5dq8\" (UniqueName: \"kubernetes.io/projected/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-kube-api-access-j5dq8\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: E0129 13:16:40.410345 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:40 crc kubenswrapper[4787]: E0129 13:16:40.410473 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:40.910425329 +0000 UTC m=+39.671685805 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.410954 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.424381 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.431757 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5dq8\" (UniqueName: \"kubernetes.io/projected/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-kube-api-access-j5dq8\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.441319 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.457010 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.477385 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.498524 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.498551 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.498570 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.498592 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.498606 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.500944 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.514978 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.531043 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.541910 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.555386 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.568865 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.585196 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.601662 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.602039 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.602191 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.602298 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.602408 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.605337 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.624185 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.643379 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.665729 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.683418 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.702961 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.704501 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.704531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.704542 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.704564 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.704576 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.717639 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.746167 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.764128 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.778164 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.795614 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.807528 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.807578 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.807589 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.807611 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.807625 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.811119 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.825855 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:40Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.911041 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.911103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.911119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.911143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.911156 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:40Z","lastTransitionTime":"2026-01-29T13:16:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.916549 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:40 crc kubenswrapper[4787]: E0129 13:16:40.916704 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:40 crc kubenswrapper[4787]: E0129 13:16:40.916779 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:41.916760183 +0000 UTC m=+40.678020449 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:40 crc kubenswrapper[4787]: I0129 13:16:40.955185 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 09:33:11.97908544 +0000 UTC Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.013886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.013931 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.013939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.013956 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.013967 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.116505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.116574 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.116593 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.116625 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.116647 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.219956 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.220032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.220048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.220074 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.220090 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.322935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.323019 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.323050 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.323077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.323094 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.351513 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/1.log" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.352379 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/0.log" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.356279 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b" exitCode=1 Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.356365 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.356479 4787 scope.go:117] "RemoveContainer" containerID="e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.358640 4787 scope.go:117] "RemoveContainer" containerID="6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.358853 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.376833 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.392795 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.412131 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.428509 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.431892 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.431931 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.431950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.431971 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.431987 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.451862 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.471580 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.488533 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.506486 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.526095 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.535771 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.535803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.535812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.535827 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.535835 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.554402 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.565637 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.583984 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.599825 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.614130 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.631823 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.638137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.638191 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.638205 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.638223 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.638237 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.645998 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:41Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.741469 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.741504 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.741513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.741528 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.741539 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.844387 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.844427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.844437 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.844467 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.844479 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.929844 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.930087 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.930274 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:43.93025511 +0000 UTC m=+42.691515396 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.947180 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.947267 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.947300 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.947339 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.947366 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:41Z","lastTransitionTime":"2026-01-29T13:16:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.955953 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 12:42:27.177459764 +0000 UTC Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.985665 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.985820 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.985843 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.985905 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.985954 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.985995 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:41 crc kubenswrapper[4787]: I0129 13:16:41.986046 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:41 crc kubenswrapper[4787]: E0129 13:16:41.986091 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.002549 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.019115 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.032893 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.044737 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.050349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.050406 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.050419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.050439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.050476 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.059051 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.074315 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.088331 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.114496 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.127555 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.148814 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.153097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.153148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.153162 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.153185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.153200 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.166002 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.185699 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.199043 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.210230 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.223982 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.236613 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.256247 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.256312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.256329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.256356 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.256371 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.360085 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.360144 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.360162 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.360201 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.360225 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.364329 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/1.log" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.464108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.464152 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.464162 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.464180 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.464191 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.567521 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.567571 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.567584 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.567605 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.567619 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.671099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.671405 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.671529 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.671649 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.671691 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.775358 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.775411 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.775428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.775480 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.775494 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.879190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.879261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.879278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.879307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.879325 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.956786 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 21:58:29.802789423 +0000 UTC Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.982791 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.982839 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.982853 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.982876 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:42 crc kubenswrapper[4787]: I0129 13:16:42.982891 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:42Z","lastTransitionTime":"2026-01-29T13:16:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.086810 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.086872 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.086883 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.086906 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.086919 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.189987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.190056 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.190075 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.190104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.190125 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.292887 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.292967 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.292996 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.293030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.293058 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.395612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.395712 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.395741 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.395778 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.395802 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.500057 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.500143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.500171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.500215 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.500245 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.602638 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.602676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.602688 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.602704 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.602714 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.706717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.706944 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.707076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.707266 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.707399 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.811128 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.811556 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.811678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.811795 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.811983 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.915833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.915889 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.915900 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.916060 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.916093 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:43Z","lastTransitionTime":"2026-01-29T13:16:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.953857 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.954113 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.954220 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:47.954196856 +0000 UTC m=+46.715457142 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.957595 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 05:36:58.588852145 +0000 UTC Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.984868 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.985034 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.985596 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.985680 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.985832 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.985996 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:43 crc kubenswrapper[4787]: I0129 13:16:43.986068 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:43 crc kubenswrapper[4787]: E0129 13:16:43.986149 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.018814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.018861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.018873 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.018894 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.018910 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.122165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.122253 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.122278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.122314 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.122338 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.225052 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.225100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.225112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.225129 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.225138 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.329035 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.329101 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.329119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.329146 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.329166 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.433264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.433561 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.433593 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.433629 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.433654 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.537193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.537663 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.537808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.537946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.538069 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.642014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.642571 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.642756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.642898 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.643085 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.746213 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.746268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.746285 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.746309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.746338 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.849242 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.849316 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.849341 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.849377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.849401 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.952242 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.952294 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.952307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.952333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.952351 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:44Z","lastTransitionTime":"2026-01-29T13:16:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.957932 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 18:47:24.509600482 +0000 UTC Jan 29 13:16:44 crc kubenswrapper[4787]: I0129 13:16:44.986575 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.055880 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.055958 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.055973 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.055992 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.056008 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.085622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.085664 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.085674 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.085691 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.085701 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.099200 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.103163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.103205 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.103214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.103234 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.103246 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.120029 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.124743 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.124791 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.124801 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.124821 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.124840 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.137230 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.146898 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.146939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.146948 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.146966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.146978 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.161238 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.165790 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.165850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.165870 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.165893 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.165906 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.178360 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.178723 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.181142 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.181233 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.181307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.181380 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.181441 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.284771 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.284818 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.284833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.284850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.284861 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.389981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.390010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.390020 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.390036 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.390046 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.392293 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.394352 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.394785 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.408990 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.420915 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.431410 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.444633 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.455581 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.469380 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.482485 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.492875 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.492906 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.492915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.492932 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.492942 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.498860 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.512502 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.521986 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.536793 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.552039 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.565486 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.579264 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.596796 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.597095 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.597200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.597297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.597403 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.609164 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.622636 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:45Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.700389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.700442 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.700618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.700638 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.700649 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.804300 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.804678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.804753 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.804844 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.804938 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.907880 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.907923 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.907933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.907949 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.907961 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:45Z","lastTransitionTime":"2026-01-29T13:16:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.958554 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 20:27:11.463187606 +0000 UTC Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.988629 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.988805 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.988899 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.988967 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.989024 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.989085 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:45 crc kubenswrapper[4787]: I0129 13:16:45.989130 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:45 crc kubenswrapper[4787]: E0129 13:16:45.989186 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.011697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.012043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.012121 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.012439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.012559 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.116781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.116853 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.116871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.116897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.116912 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.219372 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.219438 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.219548 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.219579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.219595 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.323134 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.323595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.323698 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.323814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.323905 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.427047 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.427114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.427135 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.427163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.427179 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.530023 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.530073 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.530084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.530108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.530122 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.633225 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.633283 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.633300 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.633329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.633345 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.736374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.736766 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.736890 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.736926 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.736944 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.840353 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.840418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.840431 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.840477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.840496 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.943320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.943387 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.943402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.943425 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.943439 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:46Z","lastTransitionTime":"2026-01-29T13:16:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:46 crc kubenswrapper[4787]: I0129 13:16:46.959023 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 21:59:49.706612733 +0000 UTC Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.046103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.046171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.046184 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.046206 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.046220 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.149083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.149126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.149135 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.149152 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.149166 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.252261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.252329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.252348 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.252376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.252394 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.355913 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.355966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.355979 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.356003 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.356017 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.458437 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.458521 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.458536 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.458555 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.458568 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.561510 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.561567 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.561579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.561598 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.561611 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.664448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.664543 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.664577 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.664597 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.664608 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.767917 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.767974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.767986 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.768012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.768037 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.871532 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.871607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.871631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.871667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.871692 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.959944 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 12:18:15.141800165 +0000 UTC Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.975440 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.975544 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.975567 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.975598 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.975637 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:47Z","lastTransitionTime":"2026-01-29T13:16:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.985841 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.985935 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.985873 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:47 crc kubenswrapper[4787]: I0129 13:16:47.985841 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:47 crc kubenswrapper[4787]: E0129 13:16:47.986107 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:47 crc kubenswrapper[4787]: E0129 13:16:47.986322 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:47 crc kubenswrapper[4787]: E0129 13:16:47.986427 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:47 crc kubenswrapper[4787]: E0129 13:16:47.986707 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.018032 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:48 crc kubenswrapper[4787]: E0129 13:16:48.018351 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:48 crc kubenswrapper[4787]: E0129 13:16:48.018441 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:16:56.018418552 +0000 UTC m=+54.779678868 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.078794 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.078864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.078888 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.078919 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.078938 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.181551 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.181644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.181668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.181708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.181729 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.285169 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.285224 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.285238 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.285260 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.285277 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.388240 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.388568 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.388665 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.388745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.388823 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.492491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.492843 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.492943 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.493037 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.493131 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.596183 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.596222 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.596232 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.596248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.596257 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.698586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.698923 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.698987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.699123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.699198 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.801644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.801682 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.801695 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.801715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.801727 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.903943 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.903987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.903998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.904016 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.904026 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:48Z","lastTransitionTime":"2026-01-29T13:16:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:48 crc kubenswrapper[4787]: I0129 13:16:48.960526 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 21:28:47.398997868 +0000 UTC Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.008026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.008061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.008082 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.008102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.008112 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.111991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.112042 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.112052 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.112070 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.112081 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.216006 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.216097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.216119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.216151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.216172 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.319501 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.319573 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.319595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.319628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.319651 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.423357 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.423417 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.423431 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.423479 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.423499 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.526374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.526861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.526961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.527104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.527217 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.630284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.630776 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.630918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.631070 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.631179 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.734562 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.734937 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.735042 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.735149 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.735235 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.838462 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.838534 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.838548 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.838572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.838586 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.942334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.942388 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.942402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.942425 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.942439 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:49Z","lastTransitionTime":"2026-01-29T13:16:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.960686 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 00:35:38.060370662 +0000 UTC Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.985465 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.985540 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.985591 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:49 crc kubenswrapper[4787]: E0129 13:16:49.985620 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:49 crc kubenswrapper[4787]: E0129 13:16:49.985757 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:49 crc kubenswrapper[4787]: E0129 13:16:49.985824 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:49 crc kubenswrapper[4787]: I0129 13:16:49.986237 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:49 crc kubenswrapper[4787]: E0129 13:16:49.986414 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.045492 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.045561 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.045572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.045592 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.045605 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.148169 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.148219 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.148235 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.148259 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.148275 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.250313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.250366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.250378 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.250397 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.250411 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.354705 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.354765 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.354777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.354798 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.354811 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.457555 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.457631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.457641 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.457657 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.457670 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.560138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.560173 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.560181 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.560195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.560204 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.664104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.664174 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.664186 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.664203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.664215 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.768123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.768177 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.768190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.768210 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.768223 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.871497 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.871542 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.871556 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.871575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.871585 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.961724 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 17:57:30.070547409 +0000 UTC Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.974508 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.974560 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.974571 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.974595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:50 crc kubenswrapper[4787]: I0129 13:16:50.974609 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:50Z","lastTransitionTime":"2026-01-29T13:16:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.078243 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.078597 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.078613 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.078641 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.078655 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.181686 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.181757 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.181775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.181808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.181830 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.285594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.285650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.285661 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.285682 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.285698 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.390176 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.390242 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.390256 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.390279 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.390294 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.492920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.492983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.492995 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.493017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.493028 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.595413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.595483 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.595494 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.595513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.595533 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.698671 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.698715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.698727 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.698750 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.698767 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.802420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.802525 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.802545 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.802578 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.802601 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.905674 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.905754 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.905769 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.905797 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.905817 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:51Z","lastTransitionTime":"2026-01-29T13:16:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.962150 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 13:07:56.612746542 +0000 UTC Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.984852 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.984961 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.984877 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:51 crc kubenswrapper[4787]: E0129 13:16:51.985059 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:51 crc kubenswrapper[4787]: I0129 13:16:51.984958 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:51 crc kubenswrapper[4787]: E0129 13:16:51.985157 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:51 crc kubenswrapper[4787]: E0129 13:16:51.985246 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:51 crc kubenswrapper[4787]: E0129 13:16:51.985395 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.003138 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.008756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.008827 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.008842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.008861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.008873 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.021741 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.037083 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.053224 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.071769 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.086559 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.102569 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.112613 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.112667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.112679 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.112697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.112710 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.130483 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.141898 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.155448 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.168987 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.187631 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.200174 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.213678 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.215176 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.215248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.215307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.215338 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.215361 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.230769 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.243243 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:52Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.318215 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.318258 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.318267 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.318283 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.318293 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.420268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.420302 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.420311 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.420324 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.420334 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.524657 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.524709 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.524718 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.524737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.524746 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.627352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.627403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.627414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.627436 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.627470 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.730105 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.730139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.730148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.730165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.730176 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.832825 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.832865 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.832875 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.832891 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.832904 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.934957 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.935007 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.935021 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.935041 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.935056 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:52Z","lastTransitionTime":"2026-01-29T13:16:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:52 crc kubenswrapper[4787]: I0129 13:16:52.962625 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 17:20:18.008389444 +0000 UTC Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.036927 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.036992 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.037003 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.037025 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.037037 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.139803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.139843 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.139852 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.139866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.139876 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.243045 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.243106 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.243123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.243148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.243185 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.346592 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.346640 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.346655 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.346673 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.346683 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.450238 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.450282 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.450297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.450315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.450325 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.555119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.555200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.555225 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.555258 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.555284 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.658418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.658980 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.659207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.659427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.659639 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.762847 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.762902 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.762916 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.762961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.762983 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.866814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.867319 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.867513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.867663 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.867832 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.962959 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 01:32:02.11158146 +0000 UTC Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.972713 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.972775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.972796 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.972826 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.972849 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:53Z","lastTransitionTime":"2026-01-29T13:16:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.985153 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.985311 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:53 crc kubenswrapper[4787]: E0129 13:16:53.985371 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:53 crc kubenswrapper[4787]: E0129 13:16:53.985675 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.985686 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:53 crc kubenswrapper[4787]: I0129 13:16:53.986381 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:53 crc kubenswrapper[4787]: E0129 13:16:53.986447 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:53 crc kubenswrapper[4787]: E0129 13:16:53.986609 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.076077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.076124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.076136 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.076160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.076176 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.180407 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.180477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.180492 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.180511 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.180522 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.284341 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.284427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.284448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.284555 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.284625 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.388404 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.388900 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.389048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.389192 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.389332 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.492163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.492522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.492655 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.492756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.492838 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.595687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.595729 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.595739 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.595756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.595768 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.698678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.698734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.698746 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.698765 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.698778 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.802763 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.802814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.802828 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.802854 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.802870 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.905928 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.906002 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.906031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.906060 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.906076 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:54Z","lastTransitionTime":"2026-01-29T13:16:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:54 crc kubenswrapper[4787]: I0129 13:16:54.964516 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 09:56:32.426578094 +0000 UTC Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.008825 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.008901 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.008915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.008935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.008949 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.111376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.111414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.111423 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.111441 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.111462 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.213998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.214093 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.214107 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.214129 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.214576 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.313314 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.317091 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.317190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.317207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.317229 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.317266 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.326203 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.333489 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.349056 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.373369 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e59abfbd3506b7527c45caee5227c453148811a1e511896c4cff3b4246ec2d83\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\" 6045 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0129 13:16:40.266908 6045 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0129 13:16:40.266986 6045 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0129 13:16:40.267017 6045 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0129 13:16:40.267099 6045 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0129 13:16:40.267122 6045 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0129 13:16:40.267155 6045 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0129 13:16:40.267158 6045 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0129 13:16:40.267178 6045 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0129 13:16:40.267187 6045 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0129 13:16:40.267191 6045 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0129 13:16:40.267198 6045 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0129 13:16:40.267201 6045 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0129 13:16:40.267215 6045 handler.go:208] Removed *v1.Node event handler 2\\\\nI0129 13:16:40.267268 6045 factory.go:656] Stopping watch factory\\\\nI0129 13:16:40.267295 6045 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.384627 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.395939 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.407684 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.414905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.414973 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.414989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.415010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.415022 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.421889 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.426662 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.430607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.430712 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.430770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.430833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.430911 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.436844 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.443579 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.451670 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.452043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.452076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.452089 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.452108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.452120 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.462268 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.464051 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.468585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.468619 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.468632 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.468652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.468665 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.475204 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.480635 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.484158 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.484185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.484195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.484210 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.484222 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.491050 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.497424 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.497588 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.499113 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.499137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.499145 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.499160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.499169 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.506117 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.520137 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.533706 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.548745 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.602896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.602965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.602976 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.602995 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.603008 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.705857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.705944 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.705957 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.706031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.706078 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.809654 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.809701 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.809711 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.809729 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.809742 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.814137 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814298 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:17:27.814278907 +0000 UTC m=+86.575539183 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.814360 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.814402 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.814433 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.814517 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814559 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814578 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814591 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814623 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814687 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814637 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:27.814623705 +0000 UTC m=+86.575883981 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814713 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814734 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814747 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:27.814721818 +0000 UTC m=+86.575982094 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814624 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814773 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:27.814765409 +0000 UTC m=+86.576025685 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.814934 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:27.814884982 +0000 UTC m=+86.576145298 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.903980 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.905047 4787 scope.go:117] "RemoveContainer" containerID="6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.911490 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.911549 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.911562 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.911577 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.911586 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:55Z","lastTransitionTime":"2026-01-29T13:16:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.920305 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.941409 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.959008 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.965439 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 06:19:57.758157984 +0000 UTC Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.973930 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.985249 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.985596 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.985877 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.985931 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.985978 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.986020 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.986136 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:55 crc kubenswrapper[4787]: E0129 13:16:55.986195 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:55 crc kubenswrapper[4787]: I0129 13:16:55.989306 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:55Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.004778 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017462 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.017485 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.032290 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.048218 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.063289 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.088735 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.101025 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.117095 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:56 crc kubenswrapper[4787]: E0129 13:16:56.117445 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:56 crc kubenswrapper[4787]: E0129 13:16:56.117570 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:12.117552285 +0000 UTC m=+70.878812561 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.121380 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.121442 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.121474 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.121501 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.121517 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.132062 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.153867 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.172684 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.189898 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.203235 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:56Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.224508 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.224552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.224563 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.224625 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.224637 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.327805 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.327854 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.327863 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.327882 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.327893 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.430977 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.431039 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.431057 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.431084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.431105 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.534313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.534357 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.534375 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.534401 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.534422 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.638114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.638165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.638181 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.638211 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.638229 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.741317 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.741351 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.741360 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.741376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.741386 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.844011 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.844059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.844069 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.844090 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.844104 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.947760 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.947821 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.947845 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.947873 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.947892 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:56Z","lastTransitionTime":"2026-01-29T13:16:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:56 crc kubenswrapper[4787]: I0129 13:16:56.965975 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 22:03:04.85067329 +0000 UTC Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.051190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.051764 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.051781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.051806 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.051821 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.154946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.155002 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.155019 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.155045 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.155064 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.258250 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.258299 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.258308 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.258328 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.258340 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.363624 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.363709 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.363737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.363770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.363796 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.446067 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/1.log" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.453855 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.455184 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.467166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.467214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.467228 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.467261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.467276 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.477382 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.504595 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.529416 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.563606 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.570549 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.570594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.570607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.570628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.570642 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.579905 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.600051 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.615681 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.632996 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.658808 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.673925 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.673973 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.673985 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.674007 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.674023 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.674040 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.701115 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.720708 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.734432 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.751602 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.765851 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.777118 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.777183 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.777202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.777226 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.777244 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.782622 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.801474 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:57Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.880138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.880707 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.880953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.881108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.881252 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.967099 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 04:07:52.917784737 +0000 UTC Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.984367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.984402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.984411 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.984430 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.984441 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:57Z","lastTransitionTime":"2026-01-29T13:16:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.985000 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.985115 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.985050 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:57 crc kubenswrapper[4787]: E0129 13:16:57.985512 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:57 crc kubenswrapper[4787]: I0129 13:16:57.985652 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:57 crc kubenswrapper[4787]: E0129 13:16:57.985913 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:57 crc kubenswrapper[4787]: E0129 13:16:57.985721 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:57 crc kubenswrapper[4787]: E0129 13:16:57.985965 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.087495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.087559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.087580 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.087601 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.087632 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.141872 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.157868 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191209 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191379 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191391 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.191431 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.217400 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.242805 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.258758 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.276984 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.289029 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.294292 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.294354 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.294365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.294383 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.294394 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.301413 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.319608 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.332908 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.348561 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.364443 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.380382 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.396101 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.397505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.397559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.397570 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.397584 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.397596 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.411008 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.424311 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.439846 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.459382 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/2.log" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.460667 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/1.log" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.465109 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" exitCode=1 Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.465164 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.465208 4787 scope.go:117] "RemoveContainer" containerID="6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.466337 4787 scope.go:117] "RemoveContainer" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" Jan 29 13:16:58 crc kubenswrapper[4787]: E0129 13:16:58.466617 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.493926 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.500660 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.500708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.500717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.500734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.500748 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.513058 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.527170 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.548764 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.569281 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.585187 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.595738 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.603950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.603988 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.604000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.604038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.604050 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.608717 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.620640 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.643578 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.658140 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.674517 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.685032 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.698044 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.706833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.706875 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.706886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.706905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.706920 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.711872 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.725701 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.745603 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f0169fd5d7f35b05ecd6ec8be381d6c26c299c1182dd3e0a6174e56d85df32b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:41Z\\\",\\\"message\\\":\\\"{services.LB{Name:\\\\\\\"Service_openshift-console/console_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-console/console\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.194\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI0129 13:16:41.182491 6299 obj_retry.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nF0129 13:16:41.183468 6299 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"ht\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:58Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.809059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.809107 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.809118 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.809137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.809148 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.945967 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.946403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.946512 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.946605 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.946672 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:58Z","lastTransitionTime":"2026-01-29T13:16:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:58 crc kubenswrapper[4787]: I0129 13:16:58.967713 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 01:37:36.889459386 +0000 UTC Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.050374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.050431 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.050483 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.050520 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.050542 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.153940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.153994 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.154005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.154029 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.154042 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.255965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.256009 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.256017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.256031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.256041 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.358585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.358803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.358860 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.358890 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.358912 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.478777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.478838 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.478850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.478876 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.478891 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.481381 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/2.log" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.485127 4787 scope.go:117] "RemoveContainer" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" Jan 29 13:16:59 crc kubenswrapper[4787]: E0129 13:16:59.485342 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.502671 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.517118 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.535545 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.554950 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.567529 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.577968 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.581833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.581875 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.581886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.581910 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.581922 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.600786 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.614407 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.626220 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.636790 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.650926 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.661329 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.672004 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.687791 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.687849 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.687859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.687877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.687894 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.689167 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.717876 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.738236 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.751681 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:16:59Z is after 2025-08-24T17:21:41Z" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.790609 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.790666 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.790680 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.790704 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.790719 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.894090 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.894164 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.894179 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.894205 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.894230 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.969332 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 15:01:16.566501989 +0000 UTC Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.985068 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.985150 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.985150 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.985235 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:16:59 crc kubenswrapper[4787]: E0129 13:16:59.985425 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:16:59 crc kubenswrapper[4787]: E0129 13:16:59.985558 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:16:59 crc kubenswrapper[4787]: E0129 13:16:59.985776 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:16:59 crc kubenswrapper[4787]: E0129 13:16:59.986008 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.996934 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.997025 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.997043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.997066 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:16:59 crc kubenswrapper[4787]: I0129 13:16:59.997086 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:16:59Z","lastTransitionTime":"2026-01-29T13:16:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.099607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.099644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.099652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.099666 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.099674 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.202674 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.202755 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.202775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.202805 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.202829 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.307314 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.307392 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.307413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.307446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.307505 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.411252 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.411316 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.411335 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.411368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.411387 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.517579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.517674 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.517705 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.517740 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.517770 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.620600 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.620656 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.620668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.620689 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.620701 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.724144 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.724203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.724221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.724248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.724268 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.827635 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.828076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.828222 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.828416 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.828627 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.932743 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.933160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.933287 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.933406 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.933542 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:00Z","lastTransitionTime":"2026-01-29T13:17:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:00 crc kubenswrapper[4787]: I0129 13:17:00.970479 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 12:08:26.641724587 +0000 UTC Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.037363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.037822 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.037998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.038151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.038318 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.142157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.142222 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.142239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.142266 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.142285 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.246013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.246412 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.246747 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.246908 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.247044 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.350759 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.351109 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.351208 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.351311 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.351399 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.454694 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.454780 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.454790 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.454807 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.454819 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.557731 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.557969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.558110 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.558232 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.558367 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.662157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.662215 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.662234 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.662264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.662284 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.766010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.766077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.766098 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.766128 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.766150 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.872329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.872440 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.872510 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.872549 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.872574 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.971441 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 10:42:04.797593598 +0000 UTC Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.978061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.978533 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.978784 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.979006 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.979226 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:01Z","lastTransitionTime":"2026-01-29T13:17:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.985684 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.985755 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.985699 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:01 crc kubenswrapper[4787]: I0129 13:17:01.985948 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:01 crc kubenswrapper[4787]: E0129 13:17:01.985939 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:01 crc kubenswrapper[4787]: E0129 13:17:01.986073 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:01 crc kubenswrapper[4787]: E0129 13:17:01.986156 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:01 crc kubenswrapper[4787]: E0129 13:17:01.986213 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.006400 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.018999 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.034336 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.047258 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.067064 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.080897 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.083391 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.083432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.083447 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.083486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.083498 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.096377 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.112369 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.127780 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.139089 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.153063 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.164797 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.185966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.186007 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.186019 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.186040 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.186055 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.191173 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.205203 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.222965 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.233216 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.243399 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:02Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.289516 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.289563 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.289581 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.289609 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.289628 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.393034 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.393110 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.393126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.393148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.393164 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.496248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.496879 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.496902 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.496930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.496948 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.600449 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.600539 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.600563 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.600608 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.600629 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.703611 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.703670 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.703685 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.703707 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.703724 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.806806 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.806842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.806851 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.806871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.806917 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.910972 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.911031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.911047 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.911076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.911094 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:02Z","lastTransitionTime":"2026-01-29T13:17:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:02 crc kubenswrapper[4787]: I0129 13:17:02.972034 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 19:29:41.478815692 +0000 UTC Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.014310 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.014716 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.014887 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.015046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.015215 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.118445 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.118575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.118604 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.118631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.118649 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.222215 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.222284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.222307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.222331 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.222347 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.325933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.326014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.326039 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.326075 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.326099 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.429337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.429832 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.429983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.430110 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.430258 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.533194 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.533291 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.533312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.533342 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.533364 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.637762 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.637841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.637855 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.637877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.637889 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.741269 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.741892 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.742060 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.742252 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.742489 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.846572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.846642 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.846659 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.846687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.846707 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.950617 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.950746 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.950776 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.951031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.951118 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:03Z","lastTransitionTime":"2026-01-29T13:17:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.973024 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 18:46:29.856069697 +0000 UTC Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.992324 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.992379 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.992504 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:03 crc kubenswrapper[4787]: E0129 13:17:03.992577 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:03 crc kubenswrapper[4787]: I0129 13:17:03.992794 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:03 crc kubenswrapper[4787]: E0129 13:17:03.992856 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:03 crc kubenswrapper[4787]: E0129 13:17:03.992835 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:03 crc kubenswrapper[4787]: E0129 13:17:03.993071 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.055866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.055945 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.055969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.056003 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.056028 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.159240 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.159315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.159334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.159579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.159597 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.263570 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.263633 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.263650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.263677 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.263695 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.367381 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.367497 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.367517 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.367545 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.367564 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.471166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.471254 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.471280 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.471311 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.471337 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.575301 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.575937 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.576148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.576309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.576487 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.680086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.680622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.680767 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.680883 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.680971 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.784962 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.785005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.785014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.785032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.785041 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.887945 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.888518 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.888699 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.888886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.889059 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.973716 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 11:05:40.842125476 +0000 UTC Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.993074 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.993132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.993151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.993187 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:04 crc kubenswrapper[4787]: I0129 13:17:04.993208 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:04Z","lastTransitionTime":"2026-01-29T13:17:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.096637 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.096687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.096700 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.096721 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.096737 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.199622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.199696 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.199710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.199727 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.199757 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.302865 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.302935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.302959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.302989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.303010 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.406722 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.407072 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.407388 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.407586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.407717 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.512253 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.512330 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.512343 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.512367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.512395 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.614839 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.614881 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.614928 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.614946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.614959 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.717802 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.717853 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.717869 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.717898 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.717917 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.820697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.820746 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.820760 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.820779 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.820791 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.843734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.843814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.843834 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.843866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.843892 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.868106 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:05Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.880059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.880131 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.880151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.880231 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.880251 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.901870 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:05Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.907107 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.907150 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.907161 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.907179 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.907190 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.921314 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:05Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.924950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.925005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.925017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.925033 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.925044 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.939251 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:05Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.943419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.943480 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.943495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.943517 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.943531 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.957151 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:05Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.957332 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.959193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.959245 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.959256 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.959276 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.959288 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:05Z","lastTransitionTime":"2026-01-29T13:17:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.974884 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 02:45:04.437170527 +0000 UTC Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.985196 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.985313 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.985197 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.985386 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:05 crc kubenswrapper[4787]: I0129 13:17:05.985421 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.985518 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.985612 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:05 crc kubenswrapper[4787]: E0129 13:17:05.986271 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.063177 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.063245 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.063262 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.063284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.063303 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.166358 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.166415 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.166428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.166477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.166504 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.270244 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.270286 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.270297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.270315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.270329 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.373859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.373905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.373918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.373938 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.373952 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.477888 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.477960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.477983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.478095 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.478124 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.581668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.581709 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.581722 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.581740 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.581752 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.684852 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.684886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.684898 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.684915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.684928 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.786958 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.787001 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.787010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.787025 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.787038 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.890047 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.890160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.890194 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.890230 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.890252 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.975077 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 18:24:51.14041775 +0000 UTC Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.993422 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.993476 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.993489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.993505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:06 crc kubenswrapper[4787]: I0129 13:17:06.993516 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:06Z","lastTransitionTime":"2026-01-29T13:17:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.097080 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.097125 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.097136 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.097167 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.097182 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.200379 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.200435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.200477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.200496 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.200506 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.303639 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.303692 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.303708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.303730 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.303747 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.406404 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.406435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.406445 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.406477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.406487 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.510547 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.510598 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.510622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.510648 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.510665 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.614637 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.614687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.614699 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.614718 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.614731 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.719261 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.719324 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.719341 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.719366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.719383 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.823424 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.823502 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.823516 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.823541 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.823557 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.926127 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.926171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.926182 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.926196 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.926207 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:07Z","lastTransitionTime":"2026-01-29T13:17:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.975734 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 06:24:02.321398748 +0000 UTC Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.985187 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.985328 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.985228 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:07 crc kubenswrapper[4787]: I0129 13:17:07.985428 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:07 crc kubenswrapper[4787]: E0129 13:17:07.985543 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:07 crc kubenswrapper[4787]: E0129 13:17:07.985627 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:07 crc kubenswrapper[4787]: E0129 13:17:07.986199 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:07 crc kubenswrapper[4787]: E0129 13:17:07.986279 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.029579 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.029635 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.029648 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.029676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.029693 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.132815 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.132869 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.132888 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.132914 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.132936 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.235711 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.235810 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.235830 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.235856 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.235876 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.339357 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.339419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.339432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.339473 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.339490 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.442184 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.442253 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.442264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.442284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.442296 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.545167 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.545230 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.545245 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.545267 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.545280 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.647960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.648326 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.648406 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.648516 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.648597 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.751612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.751661 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.751673 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.751691 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.751703 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.855433 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.855938 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.856027 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.856117 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.856214 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.958801 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.958872 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.958891 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.958920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.958946 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:08Z","lastTransitionTime":"2026-01-29T13:17:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:08 crc kubenswrapper[4787]: I0129 13:17:08.976569 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 18:08:41.465095434 +0000 UTC Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.062031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.062102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.062114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.062138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.062151 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.165928 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.165999 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.166024 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.166059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.166083 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.269337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.269429 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.269497 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.269540 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.269565 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.372903 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.372978 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.373005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.373037 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.373063 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.476841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.476900 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.476911 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.476929 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.476941 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.579616 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.579665 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.579675 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.579693 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.579704 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.683582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.683658 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.683683 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.683713 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.683739 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.787083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.787141 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.787156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.787174 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.787185 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.890124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.890172 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.890185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.890205 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.890221 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.977780 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 01:47:13.717610804 +0000 UTC Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.985233 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.985257 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.985332 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.985353 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:09 crc kubenswrapper[4787]: E0129 13:17:09.985402 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:09 crc kubenswrapper[4787]: E0129 13:17:09.985534 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:09 crc kubenswrapper[4787]: E0129 13:17:09.985635 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:09 crc kubenswrapper[4787]: E0129 13:17:09.985829 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.992120 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.992165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.992176 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.992196 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:09 crc kubenswrapper[4787]: I0129 13:17:09.992210 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:09Z","lastTransitionTime":"2026-01-29T13:17:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.095264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.095325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.095345 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.095366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.095380 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.205043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.205130 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.205152 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.205183 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.205203 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.308770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.308821 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.308833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.308852 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.308863 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.412233 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.412287 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.412298 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.412320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.412334 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.515818 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.515875 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.515887 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.515914 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.515928 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.619017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.619061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.619071 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.619092 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.619102 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.721922 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.721964 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.721974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.721994 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.722007 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.825023 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.825273 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.825304 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.825325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.825357 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.928531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.928592 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.928604 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.928628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.928641 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:10Z","lastTransitionTime":"2026-01-29T13:17:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:10 crc kubenswrapper[4787]: I0129 13:17:10.977969 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 15:12:33.780635772 +0000 UTC Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.031738 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.031796 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.031812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.031835 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.031851 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.134987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.135078 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.135106 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.135142 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.135167 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.238607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.238668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.238682 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.238705 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.238719 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.341998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.342094 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.342123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.342154 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.342173 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.444945 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.445016 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.445039 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.445068 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.445089 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.548339 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.548403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.548417 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.548440 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.548477 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.651920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.651974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.651983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.652024 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.652037 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.754893 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.754950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.754964 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.754986 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.754999 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.857689 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.857738 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.857749 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.857767 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.857779 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.960727 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.960790 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.960813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.960841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.960859 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:11Z","lastTransitionTime":"2026-01-29T13:17:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.979100 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 17:06:48.616027729 +0000 UTC Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.985423 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.985511 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:11 crc kubenswrapper[4787]: E0129 13:17:11.985782 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.985804 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.985895 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:11 crc kubenswrapper[4787]: E0129 13:17:11.986185 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:11 crc kubenswrapper[4787]: E0129 13:17:11.986243 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:11 crc kubenswrapper[4787]: E0129 13:17:11.986647 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.986943 4787 scope.go:117] "RemoveContainer" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" Jan 29 13:17:11 crc kubenswrapper[4787]: E0129 13:17:11.987155 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:17:11 crc kubenswrapper[4787]: I0129 13:17:11.997333 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:11Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.009968 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.020405 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.032899 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.050118 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.062672 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.064185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.064229 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.064241 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.064263 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.064276 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.073622 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.083099 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.106633 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.117841 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.131301 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.135469 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:12 crc kubenswrapper[4787]: E0129 13:17:12.135649 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:17:12 crc kubenswrapper[4787]: E0129 13:17:12.135722 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:17:44.135704043 +0000 UTC m=+102.896964319 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.142480 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.154181 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.166943 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.168028 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.168089 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.168103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.168126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.168140 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.180692 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.222364 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.258733 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:12Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.271030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.271067 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.271082 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.271102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.271115 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.373813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.373909 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.373965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.373990 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.374045 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.477170 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.477228 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.477243 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.477268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.477285 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.580254 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.580297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.580313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.580333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.580348 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.683269 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.683320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.683334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.683352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.683363 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.786816 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.786908 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.786935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.786971 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.786998 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.889832 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.889894 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.889911 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.889944 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.889965 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.979795 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 14:10:51.170938676 +0000 UTC Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.992618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.992674 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.992689 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.992715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:12 crc kubenswrapper[4787]: I0129 13:17:12.992730 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:12Z","lastTransitionTime":"2026-01-29T13:17:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.095083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.095140 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.095157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.095178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.095191 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.198042 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.198094 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.198103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.198121 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.198132 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.301515 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.301594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.301614 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.301650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.301676 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.406157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.406214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.406227 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.406248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.406262 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.508781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.508841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.508854 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.508878 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.508893 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.611769 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.611823 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.611836 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.611862 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.611877 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.714812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.715182 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.715314 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.715505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.715639 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.818644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.818696 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.818708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.818733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.818746 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.921697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.921770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.921783 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.921803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.921817 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:13Z","lastTransitionTime":"2026-01-29T13:17:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.980086 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 21:31:54.237210253 +0000 UTC Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.985595 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.985595 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.985596 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:13 crc kubenswrapper[4787]: I0129 13:17:13.985622 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:13 crc kubenswrapper[4787]: E0129 13:17:13.985872 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:13 crc kubenswrapper[4787]: E0129 13:17:13.986225 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:13 crc kubenswrapper[4787]: E0129 13:17:13.986320 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:13 crc kubenswrapper[4787]: E0129 13:17:13.986442 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.024799 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.025086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.025251 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.025408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.025564 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.128192 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.128265 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.128277 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.128303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.128319 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.231552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.231645 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.231670 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.231708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.231732 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.335333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.335777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.335998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.336217 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.336442 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.439884 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.439937 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.439951 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.439974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.439991 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.546849 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.546938 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.546961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.546992 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.547015 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.548853 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/0.log" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.549172 4787 generic.go:334] "Generic (PLEG): container finished" podID="d2526766-68ea-4959-a656-b0c68c754890" containerID="c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28" exitCode=1 Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.549209 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerDied","Data":"c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.550141 4787 scope.go:117] "RemoveContainer" containerID="c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.568520 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.585738 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.597738 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.620573 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.630623 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.642794 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.649398 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.649476 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.649494 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.649515 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.649531 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.654748 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.668026 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.685838 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.700155 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.713624 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.726942 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.741051 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.753602 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.753641 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.753651 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.753669 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.753680 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.756330 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.770675 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.782586 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.796066 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:14Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.855333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.855369 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.855381 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.855398 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.855408 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.958264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.958361 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.958373 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.958387 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.958400 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:14Z","lastTransitionTime":"2026-01-29T13:17:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:14 crc kubenswrapper[4787]: I0129 13:17:14.981131 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 02:30:19.035302377 +0000 UTC Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.061689 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.061742 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.061757 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.061777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.061791 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.165294 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.165320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.165330 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.165349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.165358 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.269946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.270005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.270022 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.270043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.270058 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.373051 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.373119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.373132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.373160 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.373175 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.476303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.476347 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.476363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.476383 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.476397 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.556482 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/0.log" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.556553 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerStarted","Data":"1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.573319 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.578204 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.578238 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.578249 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.578268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.578280 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.585858 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.597817 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.617663 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.628014 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.639560 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.650098 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.663439 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.680963 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.681100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.681130 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.681142 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.681159 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.681173 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.693449 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.714536 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.735953 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.752017 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.770546 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.783594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.783620 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.783629 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.783645 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.783655 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.788050 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.806825 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.825094 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:15Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.888048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.888495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.888626 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.888758 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.888875 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.981271 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 11:20:03.910140298 +0000 UTC Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.985155 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.985220 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:15 crc kubenswrapper[4787]: E0129 13:17:15.985291 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:15 crc kubenswrapper[4787]: E0129 13:17:15.985359 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.985416 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:15 crc kubenswrapper[4787]: E0129 13:17:15.985497 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.985623 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:15 crc kubenswrapper[4787]: E0129 13:17:15.985953 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.992364 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.992429 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.992472 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.992494 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:15 crc kubenswrapper[4787]: I0129 13:17:15.992512 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:15Z","lastTransitionTime":"2026-01-29T13:17:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.097123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.097200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.097221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.097245 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.097267 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.199660 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.200085 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.200116 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.200153 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.200176 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.303765 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.303830 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.303847 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.303876 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.303900 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.312593 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.312643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.312660 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.312682 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.312698 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.334773 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:16Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.344078 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.344133 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.344146 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.344166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.344182 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.360333 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:16Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.365623 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.365697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.365724 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.365756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.365781 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.385826 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:16Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.393516 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.393605 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.393627 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.393652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.393671 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.412401 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:16Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.417505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.417582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.417593 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.417618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.417630 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.434553 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:16Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:16 crc kubenswrapper[4787]: E0129 13:17:16.434711 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.438150 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.438205 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.438225 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.438252 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.438274 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.540910 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.540979 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.540998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.541032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.541052 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.644026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.644083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.644099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.644123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.644139 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.746775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.746832 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.746845 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.746866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.746880 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.850582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.850631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.850646 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.850667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.850680 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.954086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.954137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.954156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.954184 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.954206 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:16Z","lastTransitionTime":"2026-01-29T13:17:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:16 crc kubenswrapper[4787]: I0129 13:17:16.983155 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 08:59:03.417923938 +0000 UTC Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.057650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.057728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.057749 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.058119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.058156 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.161065 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.161109 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.161140 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.161190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.161204 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.263942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.264004 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.264022 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.264050 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.264071 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.367991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.368065 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.368084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.368120 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.368142 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.471420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.471767 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.471858 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.471947 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.472005 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.574918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.574995 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.575013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.575043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.575063 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.683008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.683086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.683096 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.683117 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.683130 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.786423 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.786499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.786518 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.786546 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.786562 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.889679 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.889711 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.889719 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.889736 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.889769 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.983897 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 22:50:36.326762294 +0000 UTC Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.985287 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.985391 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:17 crc kubenswrapper[4787]: E0129 13:17:17.985495 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.985311 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.985589 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:17 crc kubenswrapper[4787]: E0129 13:17:17.985621 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:17 crc kubenswrapper[4787]: E0129 13:17:17.985849 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:17 crc kubenswrapper[4787]: E0129 13:17:17.986019 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.992166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.992209 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.992221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.992239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:17 crc kubenswrapper[4787]: I0129 13:17:17.992251 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:17Z","lastTransitionTime":"2026-01-29T13:17:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.095200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.095242 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.095251 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.095268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.095279 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.197960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.198003 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.198013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.198030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.198046 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.302056 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.302123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.302134 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.302156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.302170 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.405703 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.405768 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.405787 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.405813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.405831 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.508641 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.508690 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.508700 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.508718 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.508728 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.612756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.612833 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.612855 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.612886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.612909 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.716988 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.717449 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.717680 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.717919 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.718101 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.821224 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.821620 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.821803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.821944 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.822068 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.926279 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.926368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.926395 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.926439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.926503 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:18Z","lastTransitionTime":"2026-01-29T13:17:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:18 crc kubenswrapper[4787]: I0129 13:17:18.984611 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 16:11:38.629537445 +0000 UTC Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.029676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.030542 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.030772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.031068 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.031266 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.135142 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.135204 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.135226 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.135254 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.135274 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.239038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.239121 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.239140 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.239174 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.239199 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.342733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.342781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.342793 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.342815 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.342833 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.447477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.447553 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.447570 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.447599 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.447616 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.550590 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.550659 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.550678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.550710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.550732 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.655133 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.655203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.655224 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.655254 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.655278 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.758235 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.758327 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.758352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.758388 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.758414 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.862171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.862255 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.862279 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.862309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.862329 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.966139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.966203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.966221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.966247 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.966269 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:19Z","lastTransitionTime":"2026-01-29T13:17:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.985175 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 05:31:59.571019734 +0000 UTC Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.985291 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.985342 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.985383 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:19 crc kubenswrapper[4787]: I0129 13:17:19.985403 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:19 crc kubenswrapper[4787]: E0129 13:17:19.985489 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:19 crc kubenswrapper[4787]: E0129 13:17:19.985665 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:19 crc kubenswrapper[4787]: E0129 13:17:19.985829 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:19 crc kubenswrapper[4787]: E0129 13:17:19.985975 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.069861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.069985 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.070012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.070046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.070073 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.174140 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.174209 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.174234 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.174268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.174294 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.278527 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.278590 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.278604 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.278628 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.278644 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.381627 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.381723 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.381743 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.381772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.381791 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.485213 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.485320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.485347 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.485394 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.485422 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.589237 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.589318 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.589337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.589365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.589385 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.693008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.693080 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.693105 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.693137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.693161 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.796946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.797025 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.797058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.797083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.797098 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.900939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.900993 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.901008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.901029 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.901045 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:20Z","lastTransitionTime":"2026-01-29T13:17:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:20 crc kubenswrapper[4787]: I0129 13:17:20.985907 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 23:56:33.804537539 +0000 UTC Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.004634 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.004901 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.005132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.005303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.005505 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.109165 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.109230 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.109247 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.109275 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.109294 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.213866 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.214319 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.214507 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.214746 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.215026 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.319384 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.319852 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.319999 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.320128 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.320257 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.423612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.423685 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.423709 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.423748 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.423773 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.527368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.527799 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.528148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.528597 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.528940 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.631618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.631668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.631676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.631695 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.631705 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.734330 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.734400 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.734419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.734482 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.734504 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.837390 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.837744 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.837831 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.837895 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.837949 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.941895 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.941966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.941990 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.942136 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.942168 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:21Z","lastTransitionTime":"2026-01-29T13:17:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.985091 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.985176 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.985223 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.985319 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:21 crc kubenswrapper[4787]: E0129 13:17:21.985339 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:21 crc kubenswrapper[4787]: E0129 13:17:21.985502 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:21 crc kubenswrapper[4787]: E0129 13:17:21.985671 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:21 crc kubenswrapper[4787]: E0129 13:17:21.985813 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:21 crc kubenswrapper[4787]: I0129 13:17:21.986096 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 16:48:02.361890042 +0000 UTC Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.004729 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.025148 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.044970 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.046054 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.046115 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.046133 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.046162 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.046180 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.066648 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.087790 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.105899 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.126524 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.144332 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.149309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.149373 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.149383 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.149402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.149416 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.159339 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.173310 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.184502 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.199383 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.214615 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.243243 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.252580 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.252623 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.252634 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.252651 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.252665 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.257913 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.270389 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.285073 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:22Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.357382 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.357505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.357547 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.357585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.357608 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.461178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.461239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.461257 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.461298 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.461319 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.564340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.564402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.564419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.564445 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.564506 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.667920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.667994 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.668014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.668046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.668078 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.771717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.771803 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.771824 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.771855 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.771878 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.875560 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.875643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.875664 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.875701 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.875723 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.979059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.979137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.979244 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.979278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.979296 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:22Z","lastTransitionTime":"2026-01-29T13:17:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.986853 4787 scope.go:117] "RemoveContainer" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" Jan 29 13:17:22 crc kubenswrapper[4787]: I0129 13:17:22.987161 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 09:49:23.66033466 +0000 UTC Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.082612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.082727 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.082832 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.082920 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.082954 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.189064 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.189130 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.189150 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.189180 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.189200 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.292719 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.292811 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.292841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.292871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.292892 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.396802 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.396912 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.396934 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.396966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.396985 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.500569 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.500617 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.500631 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.500650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.500662 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.592279 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/2.log" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.595380 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.595861 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.603399 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.603452 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.603482 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.603504 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.603520 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.614737 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.633207 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.648393 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.664193 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.679433 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.693019 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.712300 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.712859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.712871 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.712889 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.712902 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.713035 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.731591 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.751170 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.765168 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.781513 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.797203 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.815366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.815409 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.815418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.815436 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.815447 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.824904 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.839552 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.853676 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.866346 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.878264 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:23Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.917624 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.917675 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.917685 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.917702 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.917712 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:23Z","lastTransitionTime":"2026-01-29T13:17:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.985315 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.985362 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.985359 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.985315 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:23 crc kubenswrapper[4787]: E0129 13:17:23.985490 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:23 crc kubenswrapper[4787]: E0129 13:17:23.985649 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:23 crc kubenswrapper[4787]: E0129 13:17:23.985787 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:23 crc kubenswrapper[4787]: E0129 13:17:23.985838 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:23 crc kubenswrapper[4787]: I0129 13:17:23.987535 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 03:37:55.712033801 +0000 UTC Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.020489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.020522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.020537 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.020554 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.020567 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.124102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.124172 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.124190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.124218 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.124237 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.227945 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.227991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.228000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.228017 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.228029 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.331204 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.331270 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.331284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.331310 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.331330 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.433838 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.433908 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.433918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.433938 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.433957 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.536366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.536405 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.536423 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.536439 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.536477 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.602186 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/3.log" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.603608 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/2.log" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.607675 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" exitCode=1 Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.607759 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.608161 4787 scope.go:117] "RemoveContainer" containerID="506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.608364 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:17:24 crc kubenswrapper[4787]: E0129 13:17:24.608531 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.625182 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.637287 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.638793 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.638832 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.638843 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.638860 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.638870 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.649777 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.666999 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.679670 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.696956 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.712496 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.726188 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.741670 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.741745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.741762 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.741777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.741788 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.742514 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.757477 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.772734 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.787484 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.799866 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.812434 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.824124 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.837589 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.845530 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.845595 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.845608 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.845644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.845658 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.858685 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://506e077297195ea904b1314d3d4ca5459aa1e3118606acb3d15f288669598d71\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:16:57Z\\\",\\\"message\\\":\\\" obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:16:57.288816 6474 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI0129 13:16:57.288650 6474 obj_retry.go:365] Adding new object: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c\\\\nI0129 13:16:57.288805 6474 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0129 13:16:57.288830 6474 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling w\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:24Z\\\",\\\"message\\\":\\\"29 13:17:24.055315 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055321 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055324 6862 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-q79sn in node crc\\\\nI0129 13:17:24.055329 6862 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-zdbwv in node crc\\\\nI0129 13:17:24.055331 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn after 0 failed attempt(s)\\\\nI0129 13:17:24.055334 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:17:24.055340 6862 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055280 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI0129 13:17:24.055343 6862 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055360 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-image-registry/node-ca-z5mvv\\\\nI0129 13:17:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:17:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:24Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.949130 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.949191 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.949624 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.949657 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.949700 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:24Z","lastTransitionTime":"2026-01-29T13:17:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:24 crc kubenswrapper[4787]: I0129 13:17:24.988535 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 06:02:50.408774445 +0000 UTC Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.004827 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.058008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.058061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.058075 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.058100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.058112 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.160940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.160986 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.160998 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.161014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.161025 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.264509 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.264590 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.264608 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.264640 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.264837 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.368972 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.369015 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.369024 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.369041 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.369050 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.472298 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.472401 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.472422 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.472451 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.472506 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.576045 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.576131 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.576156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.576193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.576221 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.615399 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/3.log" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.621290 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:17:25 crc kubenswrapper[4787]: E0129 13:17:25.621608 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.661973 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.680038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.680104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.680126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.680158 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.680181 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.686578 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.710545 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.747967 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:24Z\\\",\\\"message\\\":\\\"29 13:17:24.055315 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055321 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055324 6862 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-q79sn in node crc\\\\nI0129 13:17:24.055329 6862 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-zdbwv in node crc\\\\nI0129 13:17:24.055331 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn after 0 failed attempt(s)\\\\nI0129 13:17:24.055334 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:17:24.055340 6862 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055280 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI0129 13:17:24.055343 6862 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055360 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-image-registry/node-ca-z5mvv\\\\nI0129 13:17:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:17:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.766243 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.783760 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.783827 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.783843 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.783874 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.783893 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.791542 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.812024 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.830525 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.859513 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.875608 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.886768 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.886831 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.886844 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.886864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.886876 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.896243 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.922099 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a1f38a-fab8-40f7-a830-69f98b248108\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68163faf875dbb372c1d558714922a4e3dc848c98e7f4214368b8119a60ea5a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://002e0c00e95756993be14fdbd21f4bfabba5b0a668683566abb40354bea15d76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cd656fcebf1bc45affce940113619ab798b3df1dc867fd90f882fe30ae592d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1c2ed7638a5c14f9ae0c4d1a4e23d03c9d506a975929e8a463cae889015cae4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20be1b53cfca4976347c3f58061bbbb5b672d728c834ef2998f86fc6f84a4e5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.938646 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.954774 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.978151 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.985772 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.985836 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.985882 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.986066 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:25 crc kubenswrapper[4787]: E0129 13:17:25.986260 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:25 crc kubenswrapper[4787]: E0129 13:17:25.986528 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:25 crc kubenswrapper[4787]: E0129 13:17:25.986703 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:25 crc kubenswrapper[4787]: E0129 13:17:25.986872 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.988816 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 10:20:50.241406079 +0000 UTC Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.989513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.989556 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.989575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.989601 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:25 crc kubenswrapper[4787]: I0129 13:17:25.989619 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:25Z","lastTransitionTime":"2026-01-29T13:17:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.000504 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:25Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.019309 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.039732 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.099544 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.099584 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.099594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.099609 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.099620 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.204218 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.204320 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.204333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.204353 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.204363 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.307299 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.307375 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.307394 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.307423 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.307443 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.410551 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.410629 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.410647 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.410675 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.410693 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.442691 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.442742 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.442752 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.442770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.442782 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.462298 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.467021 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.467082 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.467101 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.467129 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.467147 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.482782 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.486353 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.486399 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.486413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.486432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.486445 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.500071 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.504313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.504352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.504365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.504385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.504401 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.519320 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.523368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.523448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.523478 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.523499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.523512 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.537333 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:26Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:26 crc kubenswrapper[4787]: E0129 13:17:26.537475 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.539139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.539174 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.539187 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.539207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.539218 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.641772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.641848 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.641870 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.641899 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.641918 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.745361 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.745403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.745412 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.745428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.745439 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.847691 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.847753 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.847771 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.847797 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.847817 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.951116 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.951195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.951218 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.951250 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.951275 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:26Z","lastTransitionTime":"2026-01-29T13:17:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:26 crc kubenswrapper[4787]: I0129 13:17:26.989659 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 14:32:23.010645621 +0000 UTC Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.003151 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.055291 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.055365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.055391 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.055422 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.055474 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.158867 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.159238 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.159397 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.159632 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.159796 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.263291 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.263706 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.263864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.264046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.264271 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.368102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.368190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.368217 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.368251 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.368276 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.472000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.472082 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.472102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.472131 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.472152 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.575991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.576072 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.576100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.576134 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.576159 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.681734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.681805 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.681829 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.681860 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.681879 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.785822 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.785897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.785916 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.785947 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.785966 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.825099 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.825288 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.825354 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.825422 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.825599 4787 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.825603 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.825546239 +0000 UTC m=+150.586806545 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.825777 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.825759305 +0000 UTC m=+150.587019611 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.825827 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.825902 4787 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.825966 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.825937901 +0000 UTC m=+150.587198207 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826018 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826072 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826101 4787 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826221 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.826180638 +0000 UTC m=+150.587440954 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826540 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826660 4787 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826742 4787 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.826907 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.826876938 +0000 UTC m=+150.588137224 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.889583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.889668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.889693 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.889733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.889757 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.985817 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.985915 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.985951 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.986100 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.986257 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.986314 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.986418 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:27 crc kubenswrapper[4787]: E0129 13:17:27.986608 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.990206 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 05:23:01.150941315 +0000 UTC Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.994282 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.994340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.994361 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.994385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:27 crc kubenswrapper[4787]: I0129 13:17:27.994405 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:27Z","lastTransitionTime":"2026-01-29T13:17:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.099390 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.099582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.099603 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.099692 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.099718 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.203312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.203367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.203385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.203411 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.203431 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.307509 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.307599 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.307643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.307681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.307707 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.411362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.411415 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.411427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.411446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.411481 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.514372 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.514447 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.514486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.514511 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.514531 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.617734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.617780 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.617791 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.617808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.617819 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.720859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.720931 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.720959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.720995 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.721021 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.823431 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.823524 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.823544 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.823572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.823594 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.927060 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.927143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.927179 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.927239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.927269 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:28Z","lastTransitionTime":"2026-01-29T13:17:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:28 crc kubenswrapper[4787]: I0129 13:17:28.990800 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 11:51:03.242906448 +0000 UTC Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.031077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.031156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.031182 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.031220 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.031249 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.134446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.134544 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.134563 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.134589 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.134607 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.237512 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.237582 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.237604 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.237635 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.237657 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.341012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.341091 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.341112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.341145 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.341172 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.444507 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.444599 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.444626 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.444664 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.444688 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.548478 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.548535 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.548547 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.548570 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.548585 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.652332 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.652405 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.652424 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.652797 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.652959 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.756164 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.756259 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.756289 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.756322 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.756344 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.859598 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.859669 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.859687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.859714 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.859733 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.962922 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.962988 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.963009 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.963044 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.963134 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:29Z","lastTransitionTime":"2026-01-29T13:17:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.984731 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.984961 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.984875 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.985054 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:29 crc kubenswrapper[4787]: E0129 13:17:29.985143 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:29 crc kubenswrapper[4787]: E0129 13:17:29.985292 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:29 crc kubenswrapper[4787]: E0129 13:17:29.985385 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:29 crc kubenswrapper[4787]: E0129 13:17:29.985440 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:29 crc kubenswrapper[4787]: I0129 13:17:29.995072 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 04:57:35.672113854 +0000 UTC Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.067178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.067241 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.067251 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.067274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.067286 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.171203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.171291 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.171315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.171350 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.171373 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.281103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.281159 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.281176 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.281206 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.281225 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.384509 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.384571 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.384583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.384602 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.384614 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.488243 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.488307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.488327 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.488353 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.488375 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.602577 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.602662 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.602686 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.602720 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.602743 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.706134 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.706202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.706219 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.706246 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.706266 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.809649 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.809747 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.809775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.809813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.809839 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.913842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.913910 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.913930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.913960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.913981 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:30Z","lastTransitionTime":"2026-01-29T13:17:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:30 crc kubenswrapper[4787]: I0129 13:17:30.995899 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 03:05:45.295923081 +0000 UTC Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.018443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.018555 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.018586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.018660 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.018688 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.122765 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.122857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.122882 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.122926 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.122973 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.226186 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.226258 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.226277 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.226300 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.226319 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.329756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.329822 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.329839 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.329868 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.329888 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.433580 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.433714 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.433787 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.433912 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.434001 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.537578 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.537644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.537664 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.537693 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.537716 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.640777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.640850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.640870 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.640895 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.640915 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.744104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.744495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.744615 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.744713 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.744798 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.849129 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.849180 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.849190 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.849211 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.849223 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.952859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.952930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.952949 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.952979 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.953001 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:31Z","lastTransitionTime":"2026-01-29T13:17:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.984819 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.984974 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.984884 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.984917 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:31 crc kubenswrapper[4787]: E0129 13:17:31.985117 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:31 crc kubenswrapper[4787]: E0129 13:17:31.985258 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:31 crc kubenswrapper[4787]: E0129 13:17:31.985440 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:31 crc kubenswrapper[4787]: E0129 13:17:31.985660 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:31 crc kubenswrapper[4787]: I0129 13:17:31.997008 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 07:36:05.652649937 +0000 UTC Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.008386 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.037972 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.056249 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.056685 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.056933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.057151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.057369 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.060248 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.089394 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:24Z\\\",\\\"message\\\":\\\"29 13:17:24.055315 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055321 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055324 6862 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-q79sn in node crc\\\\nI0129 13:17:24.055329 6862 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-zdbwv in node crc\\\\nI0129 13:17:24.055331 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn after 0 failed attempt(s)\\\\nI0129 13:17:24.055334 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:17:24.055340 6862 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055280 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI0129 13:17:24.055343 6862 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055360 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-image-registry/node-ca-z5mvv\\\\nI0129 13:17:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:17:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.104492 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.117722 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.133749 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd9e8b03-40be-41fb-9482-210e3a773e83\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://254f1230999b735f7a053b9cc896ac55f3da9c272f825f4b6b7bf2966a147dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.148779 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.160157 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.160228 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.160250 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.160281 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.160302 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.163646 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.177171 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.191114 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.223259 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a1f38a-fab8-40f7-a830-69f98b248108\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68163faf875dbb372c1d558714922a4e3dc848c98e7f4214368b8119a60ea5a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://002e0c00e95756993be14fdbd21f4bfabba5b0a668683566abb40354bea15d76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cd656fcebf1bc45affce940113619ab798b3df1dc867fd90f882fe30ae592d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1c2ed7638a5c14f9ae0c4d1a4e23d03c9d506a975929e8a463cae889015cae4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20be1b53cfca4976347c3f58061bbbb5b672d728c834ef2998f86fc6f84a4e5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.238531 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.251680 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.264568 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.264961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.265000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.265010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.265027 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.265038 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.280766 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.293287 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.303713 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.322868 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:32Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.368388 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.368435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.368473 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.368501 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.368515 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.472337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.472400 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.472417 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.472443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.472502 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.575583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.575671 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.575696 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.575732 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.575764 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.678896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.678987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.679014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.679058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.679084 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.782489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.782550 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.782568 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.782593 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.782608 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.886288 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.886384 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.886408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.886444 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.886510 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.990318 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.990403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.990430 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.990502 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.990528 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:32Z","lastTransitionTime":"2026-01-29T13:17:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:32 crc kubenswrapper[4787]: I0129 13:17:32.997796 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 06:40:03.927929778 +0000 UTC Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.093877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.093933 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.093952 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.093979 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.094000 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.197217 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.197291 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.197309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.197345 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.197365 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.301146 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.301229 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.301248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.301284 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.301305 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.405028 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.405093 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.405105 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.405127 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.405143 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.509057 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.509123 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.509143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.509170 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.509187 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.647940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.648010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.648027 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.648056 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.648078 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.752207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.752303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.752337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.752374 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.752396 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.858194 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.858278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.858332 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.858378 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.858407 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.961961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.962032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.962050 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.962080 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.962100 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:33Z","lastTransitionTime":"2026-01-29T13:17:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.985687 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.985761 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.985697 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:33 crc kubenswrapper[4787]: E0129 13:17:33.985862 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:33 crc kubenswrapper[4787]: E0129 13:17:33.986044 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.986245 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:33 crc kubenswrapper[4787]: E0129 13:17:33.986302 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:33 crc kubenswrapper[4787]: E0129 13:17:33.986364 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:33 crc kubenswrapper[4787]: I0129 13:17:33.997958 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 16:14:03.453609565 +0000 UTC Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.065617 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.065690 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.065709 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.065737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.065760 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.169977 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.170074 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.170099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.170133 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.170158 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.273948 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.273999 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.274008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.274031 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.274042 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.377983 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.378043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.378086 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.378113 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.378129 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.481527 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.481621 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.481643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.481677 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.481698 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.586014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.586080 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.586091 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.586112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.586124 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.688389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.688434 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.688444 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.688486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.688502 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.791497 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.791552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.791565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.791589 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.791604 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.894515 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.894594 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.894612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.894639 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.894657 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998165 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 14:07:21.014791666 +0000 UTC Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998746 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998789 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998802 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998820 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:34 crc kubenswrapper[4787]: I0129 13:17:34.998834 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:34Z","lastTransitionTime":"2026-01-29T13:17:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.102340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.102397 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.102415 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.102441 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.102501 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.207117 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.207166 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.207185 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.207211 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.207229 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.310035 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.310085 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.310100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.310122 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.310136 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.413841 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.413915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.413937 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.413969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.413990 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.517214 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.517304 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.517331 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.517365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.517404 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.621048 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.621112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.621124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.621147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.621161 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.725008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.725112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.725126 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.725153 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.725168 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.829372 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.829888 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.829907 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.829935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.829952 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.933208 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.933274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.933293 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.933325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.933346 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:35Z","lastTransitionTime":"2026-01-29T13:17:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.985486 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.985447 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:35 crc kubenswrapper[4787]: E0129 13:17:35.985672 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:35 crc kubenswrapper[4787]: E0129 13:17:35.985975 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.985426 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.986026 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:35 crc kubenswrapper[4787]: E0129 13:17:35.986346 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:35 crc kubenswrapper[4787]: E0129 13:17:35.986436 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:35 crc kubenswrapper[4787]: I0129 13:17:35.998806 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 12:37:30.999099344 +0000 UTC Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.035921 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.036030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.036054 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.036099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.036129 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.139336 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.139428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.139494 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.139529 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.139550 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.242301 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.242394 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.242409 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.242437 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.242481 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.347177 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.347264 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.347282 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.347315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.347361 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.450736 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.451167 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.451270 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.451385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.451554 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.554859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.554992 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.555011 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.555032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.555046 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.660014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.660052 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.660067 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.660098 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.660111 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.763814 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.763886 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.763942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.763971 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.764004 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.765769 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.765847 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.765867 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.765899 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.765922 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.782618 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.787800 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.787878 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.787935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.787966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.788008 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.802860 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.807870 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.807975 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.807989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.808014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.808032 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.822193 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.827195 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.827249 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.827263 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.827282 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.827292 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.841933 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.847304 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.847351 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.847362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.847376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.847388 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.861858 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:36Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:36Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.862078 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.866596 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.866676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.866692 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.866743 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.866762 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.969295 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.969351 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.969363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.969378 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.969388 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:36Z","lastTransitionTime":"2026-01-29T13:17:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:36 crc kubenswrapper[4787]: I0129 13:17:36.985777 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:17:36 crc kubenswrapper[4787]: E0129 13:17:36.986024 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.000280 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 08:40:27.028117866 +0000 UTC Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.073312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.073398 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.073422 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.073486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.073511 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.176989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.177059 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.177075 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.177102 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.177121 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.281334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.281427 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.281496 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.281538 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.281567 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.385942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.386024 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.386043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.386071 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.386091 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.489893 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.489965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.489985 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.490014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.490037 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.593429 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.593575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.593597 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.593621 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.593638 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.696058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.696107 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.696119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.696139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.696153 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.800034 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.800099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.800120 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.800150 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.800177 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.903280 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.903396 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.903424 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.903491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.903519 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:37Z","lastTransitionTime":"2026-01-29T13:17:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.985303 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.985351 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:37 crc kubenswrapper[4787]: E0129 13:17:37.985532 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.985687 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:37 crc kubenswrapper[4787]: I0129 13:17:37.985773 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:37 crc kubenswrapper[4787]: E0129 13:17:37.985825 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:37 crc kubenswrapper[4787]: E0129 13:17:37.986035 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:37 crc kubenswrapper[4787]: E0129 13:17:37.986071 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.001418 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 01:23:20.022933477 +0000 UTC Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.006960 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.007012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.007033 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.007057 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.007074 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.109842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.109896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.109911 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.109934 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.109952 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.212907 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.212956 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.212970 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.212991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.213003 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.316271 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.316669 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.316790 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.316924 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.317045 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.420895 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.420946 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.420959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.420981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.420995 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.524596 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.524640 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.524650 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.524672 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.524687 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.627798 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.627867 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.627877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.627895 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.627906 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.730905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.731069 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.731097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.731131 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.731157 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.834738 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.834793 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.834805 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.834826 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.834840 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.938953 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.939443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.939637 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.939778 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:38 crc kubenswrapper[4787]: I0129 13:17:38.939898 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:38Z","lastTransitionTime":"2026-01-29T13:17:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.001600 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 00:49:42.509539812 +0000 UTC Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.043829 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.043965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.043987 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.044018 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.044039 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.148531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.148600 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.148619 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.148649 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.148673 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.252066 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.252795 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.252950 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.253434 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.253726 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.357250 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.357327 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.357347 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.357376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.357397 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.461064 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.461130 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.461147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.461175 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.461195 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.565116 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.565169 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.565183 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.565207 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.565223 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.669328 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.669409 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.669432 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.669498 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.669520 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.772777 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.773223 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.773326 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.773434 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.773888 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.878254 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.878337 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.878356 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.878386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.878412 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.982773 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.982853 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.982873 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.982905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.982930 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:39Z","lastTransitionTime":"2026-01-29T13:17:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.985386 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.985448 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.985523 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:39 crc kubenswrapper[4787]: E0129 13:17:39.985637 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:39 crc kubenswrapper[4787]: I0129 13:17:39.985657 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:39 crc kubenswrapper[4787]: E0129 13:17:39.985790 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:39 crc kubenswrapper[4787]: E0129 13:17:39.986126 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:39 crc kubenswrapper[4787]: E0129 13:17:39.986355 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.002247 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 03:53:45.253628098 +0000 UTC Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.086394 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.086508 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.086535 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.086569 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.086593 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.189891 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.189965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.189984 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.190012 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.190031 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.293112 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.293164 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.293178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.293206 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.293220 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.396897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.396955 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.396982 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.397014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.397040 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.500913 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.501000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.501022 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.501054 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.501076 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.605233 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.605310 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.605334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.605367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.605391 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.709170 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.709248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.709274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.709305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.709327 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.812972 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.813049 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.813069 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.813099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.813120 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.917199 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.917268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.917317 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.917358 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:40 crc kubenswrapper[4787]: I0129 13:17:40.917383 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:40Z","lastTransitionTime":"2026-01-29T13:17:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.003826 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 22:31:42.921025081 +0000 UTC Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.020397 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.020442 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.020498 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.020522 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.020541 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.124840 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.125096 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.125117 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.125147 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.125168 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.229196 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.229741 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.229964 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.230137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.230285 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.333728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.333853 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.333881 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.333914 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.333939 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.437910 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.438004 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.438030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.438569 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.438841 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.542611 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.542712 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.542741 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.542775 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.542797 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.646210 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.646301 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.646328 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.646363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.646389 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.750535 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.750613 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.750637 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.750678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.750703 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.854403 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.854528 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.854552 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.854583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.854607 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.957870 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.957929 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.957949 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.957973 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.957993 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:41Z","lastTransitionTime":"2026-01-29T13:17:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.985400 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.985528 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:41 crc kubenswrapper[4787]: E0129 13:17:41.986765 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.986822 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:41 crc kubenswrapper[4787]: I0129 13:17:41.986912 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:41 crc kubenswrapper[4787]: E0129 13:17:41.987104 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:41 crc kubenswrapper[4787]: E0129 13:17:41.987724 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:41 crc kubenswrapper[4787]: E0129 13:17:41.987857 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.005858 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 00:51:09.344464695 +0000 UTC Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.022201 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a1f38a-fab8-40f7-a830-69f98b248108\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68163faf875dbb372c1d558714922a4e3dc848c98e7f4214368b8119a60ea5a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://002e0c00e95756993be14fdbd21f4bfabba5b0a668683566abb40354bea15d76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9cd656fcebf1bc45affce940113619ab798b3df1dc867fd90f882fe30ae592d8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f1c2ed7638a5c14f9ae0c4d1a4e23d03c9d506a975929e8a463cae889015cae4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20be1b53cfca4976347c3f58061bbbb5b672d728c834ef2998f86fc6f84a4e5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6f15cabe9f6123aad9f2224c17ed053aeecd5dd74227859c4491549d6b8ece5b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://df7c5748b3f9097a07242699322e1cbfa803f58fd2b0ad22dae84f2e1c600739\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c6bfc28c5c1f32f3b3644aacba4073d26654e13a60e57db776d7190c747bc78c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.046029 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-29T13:16:24Z\\\",\\\"message\\\":\\\"le observer\\\\nW0129 13:16:23.934384 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0129 13:16:23.935420 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0129 13:16:23.936865 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3383700973/tls.crt::/tmp/serving-cert-3383700973/tls.key\\\\\\\"\\\\nI0129 13:16:24.154238 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0129 13:16:24.163093 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0129 13:16:24.163122 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0129 13:16:24.163147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0129 13:16:24.163152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0129 13:16:24.178825 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0129 13:16:24.178870 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178877 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0129 13:16:24.178883 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0129 13:16:24.178887 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0129 13:16:24.178890 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0129 13:16:24.178894 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0129 13:16:24.178971 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0129 13:16:24.181465 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.063333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.063396 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.063413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.063441 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.063491 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.066558 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.086660 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c7ea0517dc0e161442a7133d89629590b02ab4982213acf722df5dee8a6bb22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.108256 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.130974 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zdbwv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"63554095-2494-4e27-b2a7-d949955722fa\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c73a191a696ee0c6070af2b6a72137ffee1d72a411b3be5c83fbea59d882075f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9bdsk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zdbwv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.150071 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6311862b-6ca2-4dba-85e0-6829dd45c2db\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba75cb5ab55d0ac6f61ed3ff4106b5406aee4c0beeac95a0ccd9e8d98aef18ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqczt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-q79sn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.166401 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.166486 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.166500 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.166524 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.166539 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.174542 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j6wn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2526766-68ea-4959-a656-b0c68c754890\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:14Z\\\",\\\"message\\\":\\\"2026-01-29T13:16:28+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164\\\\n2026-01-29T13:16:28+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_7265decb-7132-482b-b9c1-897d3e68e164 to /host/opt/cni/bin/\\\\n2026-01-29T13:16:29Z [verbose] multus-daemon started\\\\n2026-01-29T13:16:29Z [verbose] Readiness Indicator file check\\\\n2026-01-29T13:17:14Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:17:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gdgvk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j6wn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.192724 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ed38575e-abe5-42ad-b8a8-3791301aa9d3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f5b6b6f3527b762622396052f0b79782dbe6a1e3fbe50c7a2d8317ac202f5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://996478424b97dc322c6d0825e647af94ee5ea2c894f0657fddf91a4dd94e4f25\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a0133679114f510ddeec18774b4463fc771f6eeaf5f5c8c69119117f6accc66\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e603778a9e41bb26020946efc7c345c5f5b97b05307764a4e711a001280f109f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.207732 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.224299 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e3061118223b3e2a6b3e28a10d7444ae9f87773325cc84782467c85bd86ed8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://989b2459f35c5072d4a590682d4fbf73bc9a476e7a32b1531c89fee49804e1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.258120 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55309602-3b5c-4506-8cad-0c1609e2b1cb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-29T13:17:24Z\\\",\\\"message\\\":\\\"29 13:17:24.055315 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055321 6862 obj_retry.go:365] Adding new object: *v1.Pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055324 6862 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-q79sn in node crc\\\\nI0129 13:17:24.055329 6862 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-zdbwv in node crc\\\\nI0129 13:17:24.055331 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-q79sn after 0 failed attempt(s)\\\\nI0129 13:17:24.055334 6862 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-zdbwv after 0 failed attempt(s)\\\\nI0129 13:17:24.055340 6862 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-q79sn\\\\nI0129 13:17:24.055280 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-source-55646444c4-trplf\\\\nI0129 13:17:24.055343 6862 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-zdbwv\\\\nI0129 13:17:24.055360 6862 obj_retry.go:303] Retry object setup: *v1.Pod openshift-image-registry/node-ca-z5mvv\\\\nI0129 13:17:2\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-29T13:17:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlqsj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:26Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-pq2mb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.271085 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.271137 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.271153 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.271177 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.271192 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.279143 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5mvv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a8e24ae3-482d-462d-8c7f-2dfa9223a866\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9ddf034dceee12a345b0307bbf0b41f5334169f5a093ef3d8683ebf065eb959f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qh69l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:27Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5mvv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.302369 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6fa58f8c-a100-4857-a166-30317c2db4ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b556b5964e688715a906723fd03a0d7c8140cc2840919b73511e5f12243ab51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0745d2acef9fac35989212819a909e38a60de122a83cb633adff3095180def2d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92a08720b772a2c4550aaf2621f1adc7a63bbdce576daf6d0f9b3cb9495c2a3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.320105 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dd9e8b03-40be-41fb-9482-210e3a773e83\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://254f1230999b735f7a053b9cc896ac55f3da9c272f825f4b6b7bf2966a147dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180ad932e889b4e0870b064a0bf0ce373b3f0928a9c4575d4cda14e75d0d949e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:02Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.344542 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:27Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94fcabddad8c61ff562720c36c0708870cecc3a12ee9eff5d77cde142d70c76f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.363344 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7c3c3cd-3c8a-4dea-b37b-cff3137613ab\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d9797120fa18f44996304b2fa44382da50aac2405e6fd74f62b30ce2a6378c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9847bb510315461e9b7327eff9b6545ff3592ccf3815334eacf2d7826941df9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6g848\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-vqw5c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.374816 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.374896 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.374918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.374952 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.374972 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.388770 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a11db361-58df-40d6-ba72-c59df0ed819c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a0a1baab933df2aab50ae96d4bc223ecd7ce4d99fa0f7750b905d3f47c01c685\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:16:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e06e4a1828c2d251758ef891ab8ba6260ef20f2c0349c99c25893fc89521c2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00894d3216e6cc463c5214214a034419d0cd6f3542d195d66ff890d5bd3960e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74c1ed450faccec8b7898e35446d53c7e1300dc55de2c70bd3683dd471d65c32\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://210ad6b1d5607ea6ed50a910f83e5c0fbbd29867748951d6ee8b83533167c201\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1b1e883d0527b81c016332d6fde9e12600e8525ab774f924f71c5e0718ad041b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://41a21f22b61ff6b07d63512695c8382fc222b6f37c8400328b95c77bfa49305e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-29T13:16:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-29T13:16:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-46wtj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:25Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fdf9c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.404547 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-29T13:16:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j5dq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-29T13:16:40Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gkrsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:42Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.478973 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.479037 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.479055 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.479083 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.479106 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.582206 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.582278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.582301 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.582341 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.582369 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.685491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.685569 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.685587 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.685616 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.685639 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.789490 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.789561 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.789586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.789622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.789645 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.892787 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.892864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.892885 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.892914 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.892933 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.996151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.996218 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.996227 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.996262 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:42 crc kubenswrapper[4787]: I0129 13:17:42.996275 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:42Z","lastTransitionTime":"2026-01-29T13:17:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.006634 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 22:35:34.48530443 +0000 UTC Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.099220 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.099303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.099324 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.099359 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.099381 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.202521 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.202574 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.202588 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.202612 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.202634 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.306109 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.306192 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.306216 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.306252 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.306281 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.409708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.409772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.409783 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.409808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.409822 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.512780 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.513310 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.513495 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.513668 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.513802 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.617413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.617499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.617523 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.617560 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.617586 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.720646 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.720707 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.720721 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.720747 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.720763 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.823363 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.823413 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.823423 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.823443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.823472 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.925955 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.925996 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.926007 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.926025 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.926035 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:43Z","lastTransitionTime":"2026-01-29T13:17:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.985570 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.985643 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:43 crc kubenswrapper[4787]: E0129 13:17:43.985757 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.985800 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:43 crc kubenswrapper[4787]: I0129 13:17:43.985571 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:43 crc kubenswrapper[4787]: E0129 13:17:43.985931 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:43 crc kubenswrapper[4787]: E0129 13:17:43.986006 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:43 crc kubenswrapper[4787]: E0129 13:17:43.986135 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.006919 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 06:41:49.604241171 +0000 UTC Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.029329 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.029394 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.029416 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.029443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.029491 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.133068 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.133151 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.133171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.133199 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.133225 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.233716 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:44 crc kubenswrapper[4787]: E0129 13:17:44.234253 4787 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:17:44 crc kubenswrapper[4787]: E0129 13:17:44.234671 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs podName:0fcadf59-74fc-4aeb-abd6-55f6061fa5b0 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:48.234618613 +0000 UTC m=+166.995879009 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs") pod "network-metrics-daemon-gkrsx" (UID: "0fcadf59-74fc-4aeb-abd6-55f6061fa5b0") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.236842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.236915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.236935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.236964 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.236985 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.340737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.340812 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.340831 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.340859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.340880 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.443919 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.443974 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.443991 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.444015 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.444029 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.546919 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.546989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.547002 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.547027 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.547043 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.650751 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.651156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.651412 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.651681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.651860 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.755368 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.755414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.755428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.755473 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.755489 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.858965 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.859013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.859026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.859049 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.859070 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.962299 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.962349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.962365 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.962393 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:44 crc kubenswrapper[4787]: I0129 13:17:44.962412 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:44Z","lastTransitionTime":"2026-01-29T13:17:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.007590 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 11:10:14.557047391 +0000 UTC Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.065295 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.065360 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.065379 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.065408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.065429 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.168497 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.168538 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.168548 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.168565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.168576 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.272097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.272142 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.272155 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.272175 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.272189 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.375290 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.375352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.375372 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.375393 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.375409 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.478942 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.479006 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.479026 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.479056 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.479076 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.582513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.582586 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.582603 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.582630 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.582652 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.685309 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.685381 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.685395 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.685420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.685439 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.788220 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.788287 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.788305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.788331 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.788351 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.892113 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.892193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.892212 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.892244 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.892263 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.985497 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.985648 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.985647 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:45 crc kubenswrapper[4787]: E0129 13:17:45.985836 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.985861 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:45 crc kubenswrapper[4787]: E0129 13:17:45.986024 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:45 crc kubenswrapper[4787]: E0129 13:17:45.986206 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:45 crc kubenswrapper[4787]: E0129 13:17:45.986276 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.995498 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.995648 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.995724 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.995753 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:45 crc kubenswrapper[4787]: I0129 13:17:45.995803 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:45Z","lastTransitionTime":"2026-01-29T13:17:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.008529 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 11:48:28.26468857 +0000 UTC Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.098534 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.098947 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.099096 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.099231 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.099350 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.202071 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.202148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.202173 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.202208 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.202230 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.305419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.305545 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.305574 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.305607 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.305630 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.410249 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.410311 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.410328 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.410353 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.410371 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.512817 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.512861 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.512877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.512898 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.512914 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.616312 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.616370 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.616385 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.616408 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.616424 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.719978 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.720034 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.720052 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.720078 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.720098 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.823838 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.823918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.823935 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.823964 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.823986 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.927325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.927379 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.927389 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.927409 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:46 crc kubenswrapper[4787]: I0129 13:17:46.927419 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:46Z","lastTransitionTime":"2026-01-29T13:17:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.008918 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 02:52:55.269409916 +0000 UTC Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.030572 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.030636 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.030663 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.030691 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.030709 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.096904 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.096968 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.096981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.097005 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.097217 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.115293 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:47Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.121035 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.121088 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.121097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.121116 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.121127 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.141519 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:47Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.147334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.147396 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.147407 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.147426 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.147469 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.187265 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:47Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.196242 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.196319 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.196333 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.196354 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.196387 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.226271 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:47Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.232006 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.232064 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.232077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.232099 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.232110 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.249418 4787 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-29T13:17:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"b6cb75cf-71fc-45e9-a32b-9486bc86c1ea\\\",\\\"systemUUID\\\":\\\"1406f0c8-950e-4271-841a-c6aa782191ee\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-29T13:17:47Z is after 2025-08-24T17:21:41Z" Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.249657 4787 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.251755 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.251819 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.251835 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.251864 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.251880 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.355781 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.355842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.355857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.355885 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.355906 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.460398 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.460480 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.460491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.460513 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.460525 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.563635 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.563698 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.563710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.563734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.563747 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.667575 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.667640 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.667659 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.667688 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.667706 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.770695 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.770762 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.770770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.770785 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.770798 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.873203 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.873239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.873251 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.873266 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.873276 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.976418 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.976477 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.976489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.976505 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.976514 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:47Z","lastTransitionTime":"2026-01-29T13:17:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.985090 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.985385 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.985527 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.985090 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:47 crc kubenswrapper[4787]: I0129 13:17:47.985581 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.985655 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.985755 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:47 crc kubenswrapper[4787]: E0129 13:17:47.986001 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.010009 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 15:46:07.342828898 +0000 UTC Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.080163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.080239 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.080257 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.080286 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.080310 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.183371 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.183421 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.183438 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.183485 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.183503 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.285995 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.286045 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.286058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.286078 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.286091 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.389080 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.389154 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.389169 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.389193 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.389207 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.492608 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.492665 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.492678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.492701 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.492718 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.595557 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.595634 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.595652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.595687 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.595709 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.698588 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.698651 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.698667 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.698690 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.698708 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.802143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.802260 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.802281 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.802313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.802332 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.905340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.905401 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.905414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.905437 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.905470 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:48Z","lastTransitionTime":"2026-01-29T13:17:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:48 crc kubenswrapper[4787]: I0129 13:17:48.986173 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:17:48 crc kubenswrapper[4787]: E0129 13:17:48.986479 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.008257 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.008303 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.008315 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.008334 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.008349 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.011650 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 11:37:41.255742035 +0000 UTC Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.112046 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.112107 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.112119 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.112143 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.112162 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.215288 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.215342 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.215355 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.215377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.215392 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.318248 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.318313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.318326 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.318349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.318366 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.421349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.421395 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.421405 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.421419 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.421429 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.523897 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.523930 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.523939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.523955 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.523964 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.628021 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.628074 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.628087 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.628120 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.628136 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.730872 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.731211 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.731313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.731487 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.731573 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.834963 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.835003 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.835013 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.835030 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.835039 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.938693 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.939029 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.939106 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.939170 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.939240 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:49Z","lastTransitionTime":"2026-01-29T13:17:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.985437 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.985437 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.985637 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:49 crc kubenswrapper[4787]: I0129 13:17:49.985698 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:49 crc kubenswrapper[4787]: E0129 13:17:49.985879 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:49 crc kubenswrapper[4787]: E0129 13:17:49.986244 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:49 crc kubenswrapper[4787]: E0129 13:17:49.986392 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:49 crc kubenswrapper[4787]: E0129 13:17:49.986532 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.012269 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 19:05:15.353042758 +0000 UTC Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.042058 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.042145 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.042158 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.042186 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.042204 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.145390 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.145448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.145475 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.145499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.145516 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.248715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.248774 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.248786 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.248813 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.248826 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.352776 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.353297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.353388 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.353531 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.353618 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.458289 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.458371 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.458400 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.458422 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.458438 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.561278 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.561349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.561367 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.561428 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.561479 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.664649 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.664708 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.664722 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.664745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.664764 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.768008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.768064 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.768077 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.768100 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.768115 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.871806 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.872011 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.872055 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.872148 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.872222 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.975492 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.975565 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.975583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.975603 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:50 crc kubenswrapper[4787]: I0129 13:17:50.975617 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:50Z","lastTransitionTime":"2026-01-29T13:17:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.013068 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 06:32:47.020762218 +0000 UTC Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.079028 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.079114 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.079124 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.079145 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.079157 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.182349 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.182402 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.182414 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.182435 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.182454 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.285786 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.285862 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.285874 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.285918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.285933 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.393902 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.393980 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.394000 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.394027 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.394049 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.497652 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.497692 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.497704 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.497719 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.497730 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.600914 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.601001 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.601014 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.601035 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.601051 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.704462 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.704536 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.704551 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.704574 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.704589 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.808227 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.808270 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.808280 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.808297 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.808310 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.912010 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.912097 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.912132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.912164 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.912185 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:51Z","lastTransitionTime":"2026-01-29T13:17:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.985787 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.985857 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.985915 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:51 crc kubenswrapper[4787]: I0129 13:17:51.986005 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:51 crc kubenswrapper[4787]: E0129 13:17:51.985992 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:51 crc kubenswrapper[4787]: E0129 13:17:51.986171 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:51 crc kubenswrapper[4787]: E0129 13:17:51.986359 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:51 crc kubenswrapper[4787]: E0129 13:17:51.986662 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.013286 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 22:44:38.961293291 +0000 UTC Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.015043 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.015092 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.015108 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.015134 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.015151 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.022781 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-fdf9c" podStartSLOduration=87.022753983 podStartE2EDuration="1m27.022753983s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.022186586 +0000 UTC m=+110.783446952" watchObservedRunningTime="2026-01-29 13:17:52.022753983 +0000 UTC m=+110.784014289" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.057319 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-j6wn4" podStartSLOduration=87.057296099 podStartE2EDuration="1m27.057296099s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.05697193 +0000 UTC m=+110.818232226" watchObservedRunningTime="2026-01-29 13:17:52.057296099 +0000 UTC m=+110.818556385" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.090622 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=28.090600199 podStartE2EDuration="28.090600199s" podCreationTimestamp="2026-01-29 13:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.089463176 +0000 UTC m=+110.850723492" watchObservedRunningTime="2026-01-29 13:17:52.090600199 +0000 UTC m=+110.851860505" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.117362 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.117715 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.117734 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.117757 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.117771 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.124951 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=88.12492951 podStartE2EDuration="1m28.12492951s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.107040718 +0000 UTC m=+110.868301034" watchObservedRunningTime="2026-01-29 13:17:52.12492951 +0000 UTC m=+110.886189826" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.179153 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-zdbwv" podStartSLOduration=89.179129269 podStartE2EDuration="1m29.179129269s" podCreationTimestamp="2026-01-29 13:16:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.17812795 +0000 UTC m=+110.939388226" watchObservedRunningTime="2026-01-29 13:17:52.179129269 +0000 UTC m=+110.940389545" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.191884 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podStartSLOduration=87.191841149 podStartE2EDuration="1m27.191841149s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.190909712 +0000 UTC m=+110.952170008" watchObservedRunningTime="2026-01-29 13:17:52.191841149 +0000 UTC m=+110.953101425" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220197 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=57.220171395 podStartE2EDuration="57.220171395s" podCreationTimestamp="2026-01-29 13:16:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.205776995 +0000 UTC m=+110.967037281" watchObservedRunningTime="2026-01-29 13:17:52.220171395 +0000 UTC m=+110.981431671" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220676 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220723 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220749 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.220759 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.287929 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-z5mvv" podStartSLOduration=88.287905398 podStartE2EDuration="1m28.287905398s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.286813266 +0000 UTC m=+111.048073562" watchObservedRunningTime="2026-01-29 13:17:52.287905398 +0000 UTC m=+111.049165674" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.306788 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.306771248 podStartE2EDuration="1m29.306771248s" podCreationTimestamp="2026-01-29 13:16:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.306013516 +0000 UTC m=+111.067273802" watchObservedRunningTime="2026-01-29 13:17:52.306771248 +0000 UTC m=+111.068031524" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.320911 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=26.320885469 podStartE2EDuration="26.320885469s" podCreationTimestamp="2026-01-29 13:17:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.319767306 +0000 UTC m=+111.081027582" watchObservedRunningTime="2026-01-29 13:17:52.320885469 +0000 UTC m=+111.082145745" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.323145 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.323187 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.323200 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.323221 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.323236 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.352015 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-vqw5c" podStartSLOduration=87.351981865 podStartE2EDuration="1m27.351981865s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:52.35179655 +0000 UTC m=+111.113056846" watchObservedRunningTime="2026-01-29 13:17:52.351981865 +0000 UTC m=+111.113242141" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.427078 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.427178 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.427196 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.427253 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.427271 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.530192 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.530319 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.530386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.530426 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.530533 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.634274 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.634375 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.634420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.634512 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.634560 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.738084 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.738156 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.738171 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.738196 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.738212 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.841758 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.841856 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.841907 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.841936 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.841988 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.944772 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.945216 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.945317 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.945421 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:52 crc kubenswrapper[4787]: I0129 13:17:52.945536 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:52Z","lastTransitionTime":"2026-01-29T13:17:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.014287 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 05:28:34.829332456 +0000 UTC Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.050443 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.050525 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.050538 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.050559 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.050573 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.153742 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.154079 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.154163 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.154296 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.154424 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.258298 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.258380 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.258404 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.258446 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.258522 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.361396 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.361487 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.361500 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.361519 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.361533 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.465138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.465202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.465222 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.465246 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.465258 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.568704 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.569127 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.569266 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.569448 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.569576 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.672331 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.673355 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.673483 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.673655 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.673748 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.776583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.776645 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.776663 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.776688 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.776706 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.880002 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.880061 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.880081 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.880104 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.880120 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.982900 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.982962 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.982984 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.983008 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.983024 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:53Z","lastTransitionTime":"2026-01-29T13:17:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.985403 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.985429 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.985563 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:53 crc kubenswrapper[4787]: E0129 13:17:53.985652 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:53 crc kubenswrapper[4787]: I0129 13:17:53.985680 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:53 crc kubenswrapper[4787]: E0129 13:17:53.985767 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:53 crc kubenswrapper[4787]: E0129 13:17:53.985888 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:53 crc kubenswrapper[4787]: E0129 13:17:53.986013 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.014596 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 19:24:13.889030474 +0000 UTC Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.086268 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.086336 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.086352 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.086380 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.086401 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.189585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.189659 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.189678 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.189706 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.189726 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.292491 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.292583 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.292606 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.292642 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.292668 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.396894 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.396951 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.396961 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.396981 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.396992 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.500842 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.500905 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.500918 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.500939 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.500951 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.603862 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.603915 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.603931 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.603958 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.603975 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.707549 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.707618 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.707644 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.707681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.707707 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.811238 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.811293 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.811313 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.811340 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.811360 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.915228 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.915325 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.915344 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.915376 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:54 crc kubenswrapper[4787]: I0129 13:17:54.915404 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:54Z","lastTransitionTime":"2026-01-29T13:17:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.015811 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 20:47:03.58649509 +0000 UTC Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.018499 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.018550 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.018564 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.018585 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.018606 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.121717 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.121770 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.121784 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.121808 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.121826 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.225138 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.225212 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.225226 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.225247 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.225259 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.328591 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.328643 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.328658 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.328679 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.328723 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.432090 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.432173 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.432182 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.432202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.432216 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.535132 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.535188 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.535202 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.535226 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.535245 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.638377 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.638444 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.638487 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.638511 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.638529 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.741666 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.741728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.741745 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.741771 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.741790 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.844859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.844940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.844959 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.844989 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.845010 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.948280 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.948347 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.948366 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.948399 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.948423 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:55Z","lastTransitionTime":"2026-01-29T13:17:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.985781 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.985844 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.985904 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:55 crc kubenswrapper[4787]: I0129 13:17:55.985972 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:55 crc kubenswrapper[4787]: E0129 13:17:55.986036 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:55 crc kubenswrapper[4787]: E0129 13:17:55.986146 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:55 crc kubenswrapper[4787]: E0129 13:17:55.986246 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:55 crc kubenswrapper[4787]: E0129 13:17:55.986415 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.016003 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 11:09:41.571270891 +0000 UTC Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.051966 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.052029 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.052044 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.052076 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.052089 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.156172 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.156307 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.156339 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.156378 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.156404 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.259617 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.259680 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.259697 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.259728 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.259751 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.362549 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.362616 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.362635 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.362664 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.362682 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.464977 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.465020 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.465032 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.465055 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.465072 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.567811 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.567850 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.567859 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.567877 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.567910 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.670913 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.670957 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.670969 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.670990 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.671003 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.773758 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.773793 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.773801 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.773818 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.773828 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.881788 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.881857 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.881940 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.882022 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.882107 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.986681 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.986718 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.986727 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.986742 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:56 crc kubenswrapper[4787]: I0129 13:17:56.986751 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:56Z","lastTransitionTime":"2026-01-29T13:17:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.016976 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 00:19:45.024020208 +0000 UTC Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.090041 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.090103 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.090115 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.090139 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.090152 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.193622 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.193713 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.193733 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.193754 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.193788 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.297659 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.297737 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.297756 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.297783 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.297802 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.401305 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.401386 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.401420 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.401489 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.401517 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.504994 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.505038 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.505047 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.505065 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.505076 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.545710 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.545780 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.545796 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.545819 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.545836 4787 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-29T13:17:57Z","lastTransitionTime":"2026-01-29T13:17:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.598580 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79"] Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.599134 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.601074 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.604325 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.605799 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.606015 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.725529 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.726169 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36a89889-7568-4dd0-a40e-eb9791c7e857-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.726225 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.726300 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36a89889-7568-4dd0-a40e-eb9791c7e857-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.726340 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/36a89889-7568-4dd0-a40e-eb9791c7e857-service-ca\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827681 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827726 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36a89889-7568-4dd0-a40e-eb9791c7e857-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827767 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827800 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36a89889-7568-4dd0-a40e-eb9791c7e857-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827817 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/36a89889-7568-4dd0-a40e-eb9791c7e857-service-ca\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827848 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.827924 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/36a89889-7568-4dd0-a40e-eb9791c7e857-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.828643 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/36a89889-7568-4dd0-a40e-eb9791c7e857-service-ca\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.845594 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/36a89889-7568-4dd0-a40e-eb9791c7e857-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.846119 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/36a89889-7568-4dd0-a40e-eb9791c7e857-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-znh79\" (UID: \"36a89889-7568-4dd0-a40e-eb9791c7e857\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.922802 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.986436 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.986663 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.986533 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:57 crc kubenswrapper[4787]: I0129 13:17:57.986511 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:57 crc kubenswrapper[4787]: E0129 13:17:57.986960 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:57 crc kubenswrapper[4787]: E0129 13:17:57.987302 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:17:57 crc kubenswrapper[4787]: E0129 13:17:57.987528 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:57 crc kubenswrapper[4787]: E0129 13:17:57.987613 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:58 crc kubenswrapper[4787]: I0129 13:17:58.017972 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 12:39:07.708429907 +0000 UTC Jan 29 13:17:58 crc kubenswrapper[4787]: I0129 13:17:58.018073 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 29 13:17:58 crc kubenswrapper[4787]: I0129 13:17:58.030068 4787 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 29 13:17:58 crc kubenswrapper[4787]: I0129 13:17:58.790990 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" event={"ID":"36a89889-7568-4dd0-a40e-eb9791c7e857","Type":"ContainerStarted","Data":"dc8e4266baa4a0f24e5126ef3818d6d74d1db2717ead0c28b45be5cd8f698be8"} Jan 29 13:17:58 crc kubenswrapper[4787]: I0129 13:17:58.791091 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" event={"ID":"36a89889-7568-4dd0-a40e-eb9791c7e857","Type":"ContainerStarted","Data":"a170d244955ba813bed952f1f2c73e1faa8146c4f84eea659e99607c07f27e0c"} Jan 29 13:17:59 crc kubenswrapper[4787]: I0129 13:17:59.985653 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:17:59 crc kubenswrapper[4787]: I0129 13:17:59.985688 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:17:59 crc kubenswrapper[4787]: I0129 13:17:59.985813 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:17:59 crc kubenswrapper[4787]: E0129 13:17:59.985821 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:17:59 crc kubenswrapper[4787]: I0129 13:17:59.985853 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:17:59 crc kubenswrapper[4787]: E0129 13:17:59.985970 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:17:59 crc kubenswrapper[4787]: E0129 13:17:59.986062 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:17:59 crc kubenswrapper[4787]: E0129 13:17:59.986126 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.802263 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/1.log" Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.802948 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/0.log" Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.803014 4787 generic.go:334] "Generic (PLEG): container finished" podID="d2526766-68ea-4959-a656-b0c68c754890" containerID="1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7" exitCode=1 Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.803056 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerDied","Data":"1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7"} Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.803108 4787 scope.go:117] "RemoveContainer" containerID="c03fe0895296ca8580cd554eb80efd3b9ba696264779ebe6276ead7d676e0d28" Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.803723 4787 scope.go:117] "RemoveContainer" containerID="1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7" Jan 29 13:18:00 crc kubenswrapper[4787]: E0129 13:18:00.804243 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-j6wn4_openshift-multus(d2526766-68ea-4959-a656-b0c68c754890)\"" pod="openshift-multus/multus-j6wn4" podUID="d2526766-68ea-4959-a656-b0c68c754890" Jan 29 13:18:00 crc kubenswrapper[4787]: I0129 13:18:00.823041 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-znh79" podStartSLOduration=95.823015192 podStartE2EDuration="1m35.823015192s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:17:58.818316656 +0000 UTC m=+117.579577022" watchObservedRunningTime="2026-01-29 13:18:00.823015192 +0000 UTC m=+119.584275478" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.808778 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/1.log" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.978835 4787 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.985634 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.985700 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.985644 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.985805 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.987374 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.987559 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.987642 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.987743 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:01 crc kubenswrapper[4787]: I0129 13:18:01.987760 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:18:01 crc kubenswrapper[4787]: E0129 13:18:01.987971 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-pq2mb_openshift-ovn-kubernetes(55309602-3b5c-4506-8cad-0c1609e2b1cb)\"" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" Jan 29 13:18:02 crc kubenswrapper[4787]: E0129 13:18:02.113278 4787 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 13:18:03 crc kubenswrapper[4787]: I0129 13:18:03.985644 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:03 crc kubenswrapper[4787]: I0129 13:18:03.985779 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:03 crc kubenswrapper[4787]: I0129 13:18:03.985952 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:03 crc kubenswrapper[4787]: E0129 13:18:03.986193 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:03 crc kubenswrapper[4787]: I0129 13:18:03.986259 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:03 crc kubenswrapper[4787]: E0129 13:18:03.986386 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:03 crc kubenswrapper[4787]: E0129 13:18:03.987035 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:03 crc kubenswrapper[4787]: E0129 13:18:03.987193 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:05 crc kubenswrapper[4787]: I0129 13:18:05.985842 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:05 crc kubenswrapper[4787]: I0129 13:18:05.985874 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:05 crc kubenswrapper[4787]: E0129 13:18:05.986555 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:05 crc kubenswrapper[4787]: I0129 13:18:05.985991 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:05 crc kubenswrapper[4787]: I0129 13:18:05.985951 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:05 crc kubenswrapper[4787]: E0129 13:18:05.986845 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:05 crc kubenswrapper[4787]: E0129 13:18:05.986921 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:05 crc kubenswrapper[4787]: E0129 13:18:05.986965 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:07 crc kubenswrapper[4787]: E0129 13:18:07.114883 4787 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 13:18:07 crc kubenswrapper[4787]: I0129 13:18:07.985824 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:07 crc kubenswrapper[4787]: I0129 13:18:07.985908 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:07 crc kubenswrapper[4787]: E0129 13:18:07.986001 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:07 crc kubenswrapper[4787]: E0129 13:18:07.986125 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:07 crc kubenswrapper[4787]: I0129 13:18:07.986236 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:07 crc kubenswrapper[4787]: I0129 13:18:07.986291 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:07 crc kubenswrapper[4787]: E0129 13:18:07.986370 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:07 crc kubenswrapper[4787]: E0129 13:18:07.986472 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:09 crc kubenswrapper[4787]: I0129 13:18:09.985351 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:09 crc kubenswrapper[4787]: I0129 13:18:09.985494 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:09 crc kubenswrapper[4787]: I0129 13:18:09.985385 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:09 crc kubenswrapper[4787]: I0129 13:18:09.985542 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:09 crc kubenswrapper[4787]: E0129 13:18:09.985599 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:09 crc kubenswrapper[4787]: E0129 13:18:09.985685 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:09 crc kubenswrapper[4787]: E0129 13:18:09.985803 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:09 crc kubenswrapper[4787]: E0129 13:18:09.985907 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:11 crc kubenswrapper[4787]: I0129 13:18:11.985259 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:11 crc kubenswrapper[4787]: I0129 13:18:11.985340 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:11 crc kubenswrapper[4787]: I0129 13:18:11.985342 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:11 crc kubenswrapper[4787]: E0129 13:18:11.986399 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:11 crc kubenswrapper[4787]: I0129 13:18:11.986467 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:11 crc kubenswrapper[4787]: E0129 13:18:11.986519 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:11 crc kubenswrapper[4787]: E0129 13:18:11.986662 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:11 crc kubenswrapper[4787]: E0129 13:18:11.986762 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:12 crc kubenswrapper[4787]: E0129 13:18:12.115649 4787 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 29 13:18:12 crc kubenswrapper[4787]: I0129 13:18:12.986413 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.857519 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/3.log" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.861553 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerStarted","Data":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.862131 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.899611 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podStartSLOduration=108.899586738 podStartE2EDuration="1m48.899586738s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:13.898581645 +0000 UTC m=+132.659841951" watchObservedRunningTime="2026-01-29 13:18:13.899586738 +0000 UTC m=+132.660847014" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.985124 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.985127 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.985163 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:13 crc kubenswrapper[4787]: I0129 13:18:13.985738 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:13 crc kubenswrapper[4787]: E0129 13:18:13.986067 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:13 crc kubenswrapper[4787]: E0129 13:18:13.986257 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:13 crc kubenswrapper[4787]: E0129 13:18:13.986406 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:13 crc kubenswrapper[4787]: E0129 13:18:13.986506 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:14 crc kubenswrapper[4787]: I0129 13:18:14.034943 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gkrsx"] Jan 29 13:18:14 crc kubenswrapper[4787]: I0129 13:18:14.866855 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:14 crc kubenswrapper[4787]: E0129 13:18:14.867527 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:14 crc kubenswrapper[4787]: I0129 13:18:14.986399 4787 scope.go:117] "RemoveContainer" containerID="1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7" Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.872790 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/1.log" Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.872926 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerStarted","Data":"84b604b8776c920b884a601246c8598c99b2b3b2060c5a7f6c4aae009d172c7e"} Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.985505 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.985578 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.985590 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:15 crc kubenswrapper[4787]: I0129 13:18:15.985623 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:15 crc kubenswrapper[4787]: E0129 13:18:15.985684 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 29 13:18:15 crc kubenswrapper[4787]: E0129 13:18:15.985816 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gkrsx" podUID="0fcadf59-74fc-4aeb-abd6-55f6061fa5b0" Jan 29 13:18:15 crc kubenswrapper[4787]: E0129 13:18:15.985966 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 29 13:18:15 crc kubenswrapper[4787]: E0129 13:18:15.986029 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.858858 4787 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.917785 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6hc9z"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.918908 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.919281 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vlzsj"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.919983 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.921392 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.921598 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qbzqd"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.922369 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.922696 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.923156 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.924123 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.926748 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.927187 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.927755 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.927764 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.942075 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.944191 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.944395 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.946017 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.946168 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.946698 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.946741 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.946768 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947055 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947090 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947269 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947354 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947369 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947412 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947367 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.947727 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.948337 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.950716 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.950884 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.951064 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.951257 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.952995 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.954749 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955057 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955214 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955273 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955685 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955950 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cncjd"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955691 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.955749 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.956561 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.956772 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.958745 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.959047 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.958808 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.958924 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.959764 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.959899 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.959966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960368 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960413 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960557 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960587 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960595 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960657 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960684 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960714 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960759 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960776 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960603 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960818 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960918 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960771 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.960969 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.961257 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.961291 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.961737 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.962686 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.962948 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972206 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-node-pullsecrets\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knv76\" (UniqueName: \"kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972305 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-client\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972335 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmmd6\" (UniqueName: \"kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972370 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-config\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972397 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-encryption-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972426 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972451 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-audit\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972509 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972537 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-serving-cert\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972564 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972617 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ed21e2d-8627-418a-97d2-5576950e3494-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972650 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6mft\" (UniqueName: \"kubernetes.io/projected/8ed21e2d-8627-418a-97d2-5576950e3494-kube-api-access-h6mft\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972685 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972714 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-serving-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972766 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvd7z\" (UniqueName: \"kubernetes.io/projected/2da36a3e-2f90-4be6-b1e4-269d3324e410-kube-api-access-fvd7z\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972802 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972830 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-config\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972859 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972888 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2da36a3e-2f90-4be6-b1e4-269d3324e410-serving-cert\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972942 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2dvw\" (UniqueName: \"kubernetes.io/projected/a7214c59-d1ca-457b-adb6-12072f3793f1-kube-api-access-p2dvw\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.972982 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-image-import-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973023 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-service-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973052 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-images\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973081 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973110 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973151 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973180 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-audit-dir\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973205 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.973632 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.977925 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.981415 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-n8d4v"] Jan 29 13:18:17 crc kubenswrapper[4787]: I0129 13:18:17.981946 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.009690 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.011538 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.012168 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.012405 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.014860 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.015045 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.016585 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.018093 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.018401 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.018984 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.019843 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.019927 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.019989 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.020378 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.024746 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.025364 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.025751 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.025754 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.025933 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.025997 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.027997 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wcqwh"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.028542 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.028584 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.029685 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.030184 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.030248 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.030319 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.030949 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.030966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.031225 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.031543 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.031791 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.031915 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-k5sqz"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.032262 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.033136 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.033377 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.038266 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.038761 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-tvvl4"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.039104 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.039321 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.042847 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.044784 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.045588 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-f5b6g"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.046372 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.046575 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.055484 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.062567 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.064479 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.064727 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.081950 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6mft\" (UniqueName: \"kubernetes.io/projected/8ed21e2d-8627-418a-97d2-5576950e3494-kube-api-access-h6mft\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.081975 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.084056 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.084238 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.081996 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.084355 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.084391 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ed21e2d-8627-418a-97d2-5576950e3494-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.084418 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-serving-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.085147 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.085322 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.085523 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.085766 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.085871 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.086055 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.086165 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.086292 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.086449 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.086939 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.087182 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.087202 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.087727 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.087864 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.088931 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.089352 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.089565 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.089728 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.090201 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.090503 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.090670 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.090543 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091035 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091100 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091562 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091656 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091688 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091740 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvd7z\" (UniqueName: \"kubernetes.io/projected/2da36a3e-2f90-4be6-b1e4-269d3324e410-kube-api-access-fvd7z\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091825 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-config\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091859 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091883 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm6rj\" (UniqueName: \"kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091911 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091929 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2da36a3e-2f90-4be6-b1e4-269d3324e410-serving-cert\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091949 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091985 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2dvw\" (UniqueName: \"kubernetes.io/projected/a7214c59-d1ca-457b-adb6-12072f3793f1-kube-api-access-p2dvw\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.091979 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-serving-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092008 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092031 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-image-import-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092053 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092093 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092114 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092142 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-service-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092161 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6lw\" (UniqueName: \"kubernetes.io/projected/42992ca9-fe81-4299-bfa5-30e38dd9f127-kube-api-access-5r6lw\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092178 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092199 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-images\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092217 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092234 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-auth-proxy-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092255 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092276 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092300 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vscvd\" (UniqueName: \"kubernetes.io/projected/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-kube-api-access-vscvd\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092337 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7h4s\" (UniqueName: \"kubernetes.io/projected/24afb0d7-3f0e-479d-85dd-5aaf43928dec-kube-api-access-b7h4s\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092361 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092379 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24afb0d7-3f0e-479d-85dd-5aaf43928dec-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092400 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092419 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092440 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-audit-dir\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092473 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092490 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhr62\" (UniqueName: \"kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092514 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-node-pullsecrets\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092537 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knv76\" (UniqueName: \"kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092558 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-client\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092580 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092602 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/42992ca9-fe81-4299-bfa5-30e38dd9f127-machine-approver-tls\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092629 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092652 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9khkv\" (UniqueName: \"kubernetes.io/projected/f4fd6365-d36d-4da8-8722-c4a542dae2eb-kube-api-access-9khkv\") pod \"downloads-7954f5f757-n8d4v\" (UID: \"f4fd6365-d36d-4da8-8722-c4a542dae2eb\") " pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092668 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092688 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092705 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmmd6\" (UniqueName: \"kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092727 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092744 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092760 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d35f531-65d6-4292-96f9-c5b3d4e31982-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092780 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-config\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092797 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092817 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24afb0d7-3f0e-479d-85dd-5aaf43928dec-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092834 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phk4g\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-kube-api-access-phk4g\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092850 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-encryption-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092870 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092884 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092900 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092916 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092922 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092931 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dtms\" (UniqueName: \"kubernetes.io/projected/0d35f531-65d6-4292-96f9-c5b3d4e31982-kube-api-access-8dtms\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092971 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.092996 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093015 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-audit\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093031 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093051 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-serving-cert\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093068 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093084 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d35f531-65d6-4292-96f9-c5b3d4e31982-serving-cert\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.093713 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.095039 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-config\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.095806 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-audit-dir\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.097600 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a7214c59-d1ca-457b-adb6-12072f3793f1-node-pullsecrets\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.098725 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.099599 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-image-import-ca\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.099632 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-etcd-client\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.098168 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-config\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.100229 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ed21e2d-8627-418a-97d2-5576950e3494-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.100339 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2da36a3e-2f90-4be6-b1e4-269d3324e410-serving-cert\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.100889 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.100982 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.101275 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-service-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.101310 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a7214c59-d1ca-457b-adb6-12072f3793f1-audit\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.101494 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.102016 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2da36a3e-2f90-4be6-b1e4-269d3324e410-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.102034 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ed21e2d-8627-418a-97d2-5576950e3494-images\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.102323 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.103039 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.103549 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.103951 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.104554 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-encryption-config\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.108716 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.109151 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.110282 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.110592 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.110731 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ld62n"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.111094 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.112184 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.112892 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.113537 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.113876 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.113948 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.114200 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.119492 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7214c59-d1ca-457b-adb6-12072f3793f1-serving-cert\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.119845 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.120239 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.120417 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.121288 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.121296 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.121797 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vlzsj"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.121899 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zdpq4"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.121966 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.122440 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.122513 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.123130 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.123477 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.122588 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.125330 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.130341 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.130492 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.130886 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.131250 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.131322 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.136825 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.146855 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.153010 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.164265 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6hc9z"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.161882 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.166633 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.166650 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.167688 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.168000 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxhlz"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.169433 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cncjd"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.169578 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.170877 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.172530 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.174199 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.177037 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.179088 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-n8d4v"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.180310 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wcqwh"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.181776 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.183572 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.184169 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.185589 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.187177 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-f5b6g"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.188762 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qbzqd"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.190690 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.192147 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.193876 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.193919 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zvp2\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-kube-api-access-4zvp2\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.193950 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.193976 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4fp7\" (UniqueName: \"kubernetes.io/projected/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-kube-api-access-m4fp7\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194020 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194048 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/62dbfcbd-5249-4d13-9166-e9762e83f252-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194099 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194143 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194474 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194539 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194548 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194570 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194617 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194672 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-auth-proxy-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194699 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6lw\" (UniqueName: \"kubernetes.io/projected/42992ca9-fe81-4299-bfa5-30e38dd9f127-kube-api-access-5r6lw\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194726 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194765 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194809 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vscvd\" (UniqueName: \"kubernetes.io/projected/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-kube-api-access-vscvd\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194839 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7h4s\" (UniqueName: \"kubernetes.io/projected/24afb0d7-3f0e-479d-85dd-5aaf43928dec-kube-api-access-b7h4s\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194871 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-service-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194895 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfqgl\" (UniqueName: \"kubernetes.io/projected/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-kube-api-access-cfqgl\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194941 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24afb0d7-3f0e-479d-85dd-5aaf43928dec-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194969 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.194996 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195039 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhr62\" (UniqueName: \"kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195072 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195101 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/42992ca9-fe81-4299-bfa5-30e38dd9f127-machine-approver-tls\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195128 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195156 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/298b71af-3617-4775-bb90-1b62201b557f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195191 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9khkv\" (UniqueName: \"kubernetes.io/projected/f4fd6365-d36d-4da8-8722-c4a542dae2eb-kube-api-access-9khkv\") pod \"downloads-7954f5f757-n8d4v\" (UID: \"f4fd6365-d36d-4da8-8722-c4a542dae2eb\") " pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195220 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-metrics-tls\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195253 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195323 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195349 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298b71af-3617-4775-bb90-1b62201b557f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195373 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-config\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195420 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195448 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d35f531-65d6-4292-96f9-c5b3d4e31982-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195545 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kwh5\" (UniqueName: \"kubernetes.io/projected/5667f919-7345-4afa-b7bb-a89f26885147-kube-api-access-8kwh5\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195576 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-encryption-config\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195583 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195605 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24afb0d7-3f0e-479d-85dd-5aaf43928dec-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195633 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phk4g\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-kube-api-access-phk4g\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.195635 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196037 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196076 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298b71af-3617-4775-bb90-1b62201b557f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196099 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f5a1327-b7cf-453c-88bb-7e890ad5340e-serving-cert\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196122 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196141 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196162 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196183 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2f88bb10-54c4-41f6-9345-74d441059753-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196206 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-client\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196233 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196254 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dtms\" (UniqueName: \"kubernetes.io/projected/0d35f531-65d6-4292-96f9-c5b3d4e31982-kube-api-access-8dtms\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196277 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196301 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d559b4e-9262-4ada-8539-62bd08f3dfe4-proxy-tls\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196326 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-serving-cert\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196353 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65ea8869-3303-4888-bae4-3be58d5097b9-audit-dir\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.196376 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.197758 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/0d35f531-65d6-4292-96f9-c5b3d4e31982-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.198037 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-k5sqz"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.198136 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-auth-proxy-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.198476 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.198524 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.199489 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.199608 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.199919 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.200168 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.200226 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.200311 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d35f531-65d6-4292-96f9-c5b3d4e31982-serving-cert\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201153 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201519 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8fvs\" (UniqueName: \"kubernetes.io/projected/1f143c5b-cead-45c8-8c25-3b259d28a6b4-kube-api-access-k8fvs\") pod \"migrator-59844c95c7-5bfrw\" (UID: \"1f143c5b-cead-45c8-8c25-3b259d28a6b4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201901 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201932 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201943 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-images\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201918 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.201985 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42992ca9-fe81-4299-bfa5-30e38dd9f127-config\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202072 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-serving-cert\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202381 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-trusted-ca\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202443 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202744 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202761 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202900 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ddq9\" (UniqueName: \"kubernetes.io/projected/2f88bb10-54c4-41f6-9345-74d441059753-kube-api-access-2ddq9\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202920 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202961 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5667f919-7345-4afa-b7bb-a89f26885147-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.202983 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdv57\" (UniqueName: \"kubernetes.io/projected/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-kube-api-access-mdv57\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203028 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203074 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-audit-policies\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203174 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203194 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203231 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv9gr\" (UniqueName: \"kubernetes.io/projected/1d559b4e-9262-4ada-8539-62bd08f3dfe4-kube-api-access-lv9gr\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203253 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgb6g\" (UniqueName: \"kubernetes.io/projected/9604be66-c8c5-4ed4-97b9-15648be60d67-kube-api-access-kgb6g\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203271 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-trusted-ca\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203309 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2l68\" (UniqueName: \"kubernetes.io/projected/62dbfcbd-5249-4d13-9166-e9762e83f252-kube-api-access-s2l68\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203329 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203421 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203519 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-client\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203559 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203630 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm6rj\" (UniqueName: \"kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203658 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhmst\" (UniqueName: \"kubernetes.io/projected/7f5a1327-b7cf-453c-88bb-7e890ad5340e-kube-api-access-zhmst\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203693 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203763 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24afb0d7-3f0e-479d-85dd-5aaf43928dec-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203815 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203926 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.203969 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.204027 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-config\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.204066 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkgsz\" (UniqueName: \"kubernetes.io/projected/65ea8869-3303-4888-bae4-3be58d5097b9-kube-api-access-kkgsz\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.204105 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.204511 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.205127 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.205579 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/42992ca9-fe81-4299-bfa5-30e38dd9f127-machine-approver-tls\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.205817 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.206234 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.206437 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.206495 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.207752 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-hwc5r"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.208588 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0d35f531-65d6-4292-96f9-c5b3d4e31982-serving-cert\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.208695 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.209009 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-pgssf"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.209506 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24afb0d7-3f0e-479d-85dd-5aaf43928dec-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.209146 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.209752 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.210436 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.211031 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.212536 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.214344 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.215380 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.216434 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ld62n"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.217609 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.218637 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zdpq4"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.219790 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.220837 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-hwc5r"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.221230 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.223039 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.223144 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.224536 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.224747 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.226668 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxhlz"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.227052 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.227303 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-pgssf"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.228483 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-wx8br"] Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.229239 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.244420 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.265996 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.284544 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305191 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8fvs\" (UniqueName: \"kubernetes.io/projected/1f143c5b-cead-45c8-8c25-3b259d28a6b4-kube-api-access-k8fvs\") pod \"migrator-59844c95c7-5bfrw\" (UID: \"1f143c5b-cead-45c8-8c25-3b259d28a6b4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305247 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-images\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305275 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-serving-cert\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305298 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-trusted-ca\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305321 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ddq9\" (UniqueName: \"kubernetes.io/projected/2f88bb10-54c4-41f6-9345-74d441059753-kube-api-access-2ddq9\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305347 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5667f919-7345-4afa-b7bb-a89f26885147-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305369 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdv57\" (UniqueName: \"kubernetes.io/projected/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-kube-api-access-mdv57\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305395 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-audit-policies\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305419 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305445 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv9gr\" (UniqueName: \"kubernetes.io/projected/1d559b4e-9262-4ada-8539-62bd08f3dfe4-kube-api-access-lv9gr\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305512 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2l68\" (UniqueName: \"kubernetes.io/projected/62dbfcbd-5249-4d13-9166-e9762e83f252-kube-api-access-s2l68\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305542 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305571 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgb6g\" (UniqueName: \"kubernetes.io/projected/9604be66-c8c5-4ed4-97b9-15648be60d67-kube-api-access-kgb6g\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305596 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-trusted-ca\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305630 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305708 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-client\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305747 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305777 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhmst\" (UniqueName: \"kubernetes.io/projected/7f5a1327-b7cf-453c-88bb-7e890ad5340e-kube-api-access-zhmst\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305815 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305840 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-config\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305865 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkgsz\" (UniqueName: \"kubernetes.io/projected/65ea8869-3303-4888-bae4-3be58d5097b9-kube-api-access-kkgsz\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305895 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4fp7\" (UniqueName: \"kubernetes.io/projected/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-kube-api-access-m4fp7\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305945 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305974 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zvp2\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-kube-api-access-4zvp2\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.305999 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/62dbfcbd-5249-4d13-9166-e9762e83f252-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306065 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306091 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306116 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306186 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-service-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306215 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfqgl\" (UniqueName: \"kubernetes.io/projected/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-kube-api-access-cfqgl\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306263 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/298b71af-3617-4775-bb90-1b62201b557f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306297 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-metrics-tls\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306333 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298b71af-3617-4775-bb90-1b62201b557f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306362 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-config\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306398 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kwh5\" (UniqueName: \"kubernetes.io/projected/5667f919-7345-4afa-b7bb-a89f26885147-kube-api-access-8kwh5\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306427 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-encryption-config\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306509 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298b71af-3617-4775-bb90-1b62201b557f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306538 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f5a1327-b7cf-453c-88bb-7e890ad5340e-serving-cert\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306565 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2f88bb10-54c4-41f6-9345-74d441059753-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306592 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-client\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306628 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d559b4e-9262-4ada-8539-62bd08f3dfe4-proxy-tls\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306655 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-serving-cert\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306678 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65ea8869-3303-4888-bae4-3be58d5097b9-audit-dir\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.306698 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.307600 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.308077 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-config\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.308149 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65ea8869-3303-4888-bae4-3be58d5097b9-audit-dir\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.308541 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.308979 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.309342 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7f5a1327-b7cf-453c-88bb-7e890ad5340e-trusted-ca\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.309448 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.309720 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65ea8869-3303-4888-bae4-3be58d5097b9-audit-policies\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.310329 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-encryption-config\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.310729 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-config\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.310826 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f5a1327-b7cf-453c-88bb-7e890ad5340e-serving-cert\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.311273 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-serving-cert\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.312157 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-client\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.312242 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-serving-cert\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.312347 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/65ea8869-3303-4888-bae4-3be58d5097b9-etcd-client\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.324737 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.328786 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.345241 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.348421 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-etcd-service-ca\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.364477 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.385118 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.405942 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.438902 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.445889 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.463862 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.484202 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.503687 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.524372 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.543724 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.564486 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.584825 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.605416 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.624953 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.685082 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.690425 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6mft\" (UniqueName: \"kubernetes.io/projected/8ed21e2d-8627-418a-97d2-5576950e3494-kube-api-access-h6mft\") pod \"machine-api-operator-5694c8668f-vlzsj\" (UID: \"8ed21e2d-8627-418a-97d2-5576950e3494\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.704401 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.723911 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.745480 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.752382 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-metrics-tls\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.764702 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.785116 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.811524 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.820418 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-trusted-ca\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.825293 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.842289 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.844496 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.880598 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvd7z\" (UniqueName: \"kubernetes.io/projected/2da36a3e-2f90-4be6-b1e4-269d3324e410-kube-api-access-fvd7z\") pod \"authentication-operator-69f744f599-qbzqd\" (UID: \"2da36a3e-2f90-4be6-b1e4-269d3324e410\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.900833 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2dvw\" (UniqueName: \"kubernetes.io/projected/a7214c59-d1ca-457b-adb6-12072f3793f1-kube-api-access-p2dvw\") pod \"apiserver-76f77b778f-6hc9z\" (UID: \"a7214c59-d1ca-457b-adb6-12072f3793f1\") " pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.926032 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmmd6\" (UniqueName: \"kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6\") pod \"route-controller-manager-6576b87f9c-t64vw\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.946674 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.957949 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knv76\" (UniqueName: \"kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76\") pod \"controller-manager-879f6c89f-4whcq\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.965014 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.967133 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.985437 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 13:18:18 crc kubenswrapper[4787]: I0129 13:18:18.991320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1d559b4e-9262-4ada-8539-62bd08f3dfe4-images\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.005057 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.025704 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.028585 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1d559b4e-9262-4ada-8539-62bd08f3dfe4-proxy-tls\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.031303 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2f88bb10-54c4-41f6-9345-74d441059753-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.044778 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.066302 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.085308 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.105756 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.120202 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vlzsj"] Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.123616 4787 request.go:700] Waited for 1.009322293s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dpackage-server-manager-serving-cert&limit=500&resourceVersion=0 Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.125782 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.132799 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/62dbfcbd-5249-4d13-9166-e9762e83f252-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.145653 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.155867 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5667f919-7345-4afa-b7bb-a89f26885147-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.158764 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.171904 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.176898 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qbzqd"] Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.183793 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.184868 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.205849 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.211276 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298b71af-3617-4775-bb90-1b62201b557f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.225538 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.228813 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.245478 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.249381 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298b71af-3617-4775-bb90-1b62201b557f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.267115 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.285038 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.305030 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308196 4787 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308225 4787 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308252 4787 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308269 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key podName:9604be66-c8c5-4ed4-97b9-15648be60d67 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.808247963 +0000 UTC m=+138.569508239 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key") pod "service-ca-9c57cc56f-zdpq4" (UID: "9604be66-c8c5-4ed4-97b9-15648be60d67") : failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308368 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert podName:7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.808339846 +0000 UTC m=+138.569600122 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert") pod "olm-operator-6b444d44fb-5wvf8" (UID: "7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea") : failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.308382 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle podName:9604be66-c8c5-4ed4-97b9-15648be60d67 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.808374357 +0000 UTC m=+138.569634633 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle") pod "service-ca-9c57cc56f-zdpq4" (UID: "9604be66-c8c5-4ed4-97b9-15648be60d67") : failed to sync configmap cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.309386 4787 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.309468 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config podName:b069d68a-0c0d-472e-92e9-c2b2b84f11b8 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.809428341 +0000 UTC m=+138.570688797 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config") pod "service-ca-operator-777779d784-s9ssd" (UID: "b069d68a-0c0d-472e-92e9-c2b2b84f11b8") : failed to sync configmap cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.310542 4787 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.310579 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert podName:b069d68a-0c0d-472e-92e9-c2b2b84f11b8 nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.810571399 +0000 UTC m=+138.571831675 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert") pod "service-ca-operator-777779d784-s9ssd" (UID: "b069d68a-0c0d-472e-92e9-c2b2b84f11b8") : failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.310614 4787 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: E0129 13:18:19.310641 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert podName:7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea nodeName:}" failed. No retries permitted until 2026-01-29 13:18:19.810633961 +0000 UTC m=+138.571894237 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert") pod "olm-operator-6b444d44fb-5wvf8" (UID: "7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea") : failed to sync secret cache: timed out waiting for the condition Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.324862 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.344517 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.350396 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6hc9z"] Jan 29 13:18:19 crc kubenswrapper[4787]: W0129 13:18:19.359951 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7214c59_d1ca_457b_adb6_12072f3793f1.slice/crio-fe16e31e13febd6eacf580c7405204ae4303a8fa5a31e76328929af776965ac5 WatchSource:0}: Error finding container fe16e31e13febd6eacf580c7405204ae4303a8fa5a31e76328929af776965ac5: Status 404 returned error can't find the container with id fe16e31e13febd6eacf580c7405204ae4303a8fa5a31e76328929af776965ac5 Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.365797 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.380935 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.387041 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.405934 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.426102 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.436503 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:18:19 crc kubenswrapper[4787]: W0129 13:18:19.441778 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2431863b_8a4d_4897_a307_ed674bf53792.slice/crio-b33ba00c21070b65802192d07bfebeafd6732a7047d776bd319e75a8610cffc2 WatchSource:0}: Error finding container b33ba00c21070b65802192d07bfebeafd6732a7047d776bd319e75a8610cffc2: Status 404 returned error can't find the container with id b33ba00c21070b65802192d07bfebeafd6732a7047d776bd319e75a8610cffc2 Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.444983 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.464521 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.484135 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.504470 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.524158 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.544835 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.564187 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.584235 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.604182 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.623580 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.644930 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.663927 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.704870 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.726246 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.746146 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.764506 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.785855 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.817273 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.824115 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.842999 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.843085 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.843352 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.843600 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.843640 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.843673 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.844726 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-config\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.845626 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.846592 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-cabundle\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.849951 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-serving-cert\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.849996 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-srv-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.850134 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-profile-collector-cert\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.850287 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/9604be66-c8c5-4ed4-97b9-15648be60d67-signing-key\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.865089 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.886112 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.892418 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" event={"ID":"2431863b-8a4d-4897-a307-ed674bf53792","Type":"ContainerStarted","Data":"c391d1152110658bb71ef2bf4394e692ea92e8e1f01bb4e5c4de7ce883b3e7aa"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.892570 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" event={"ID":"2431863b-8a4d-4897-a307-ed674bf53792","Type":"ContainerStarted","Data":"b33ba00c21070b65802192d07bfebeafd6732a7047d776bd319e75a8610cffc2"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.892619 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.894434 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" event={"ID":"2da36a3e-2f90-4be6-b1e4-269d3324e410","Type":"ContainerStarted","Data":"4d3054f533ad027775454cdfbfad2f280cb31326193dd26662226656b0faee1e"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.894560 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" event={"ID":"2da36a3e-2f90-4be6-b1e4-269d3324e410","Type":"ContainerStarted","Data":"c0a96d973139081a61ca5a0118f7fa2cd183e45b811bc567c14f6fd816bafcd5"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.895849 4787 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4whcq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.895935 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.899195 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" event={"ID":"8ed21e2d-8627-418a-97d2-5576950e3494","Type":"ContainerStarted","Data":"947072f34fcc13559526c492bf94d48016f490027eb498a71f6da5c753f41afe"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.899279 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" event={"ID":"8ed21e2d-8627-418a-97d2-5576950e3494","Type":"ContainerStarted","Data":"53f0b28672bcf2e749e507b4a7960a1c031755740c89679e2ec57fdcc59e624e"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.899294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" event={"ID":"8ed21e2d-8627-418a-97d2-5576950e3494","Type":"ContainerStarted","Data":"b9236c04ba0ef917b047884cd49c540cf73d3c08942bf06ea83d9b129d543957"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.904874 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.905268 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" event={"ID":"91f03958-0c07-4b90-bf24-697aa18e3ebd","Type":"ContainerStarted","Data":"f8ab09b7ebf64e29a28dca80eab27e110baae6645e187333f0882ba271d307fa"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.905311 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" event={"ID":"91f03958-0c07-4b90-bf24-697aa18e3ebd","Type":"ContainerStarted","Data":"5c91fb7e11eeb81f3a494a78cd43aed8f3a7b730601aa1b47cf80a9ec0272185"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.905593 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.907531 4787 generic.go:334] "Generic (PLEG): container finished" podID="a7214c59-d1ca-457b-adb6-12072f3793f1" containerID="286d619a779ea85e86b434057d6b4bfc6f556e24622b9fa510691658713bd6e9" exitCode=0 Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.907582 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" event={"ID":"a7214c59-d1ca-457b-adb6-12072f3793f1","Type":"ContainerDied","Data":"286d619a779ea85e86b434057d6b4bfc6f556e24622b9fa510691658713bd6e9"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.907608 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" event={"ID":"a7214c59-d1ca-457b-adb6-12072f3793f1","Type":"ContainerStarted","Data":"fe16e31e13febd6eacf580c7405204ae4303a8fa5a31e76328929af776965ac5"} Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.910112 4787 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t64vw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.910154 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.924651 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.945751 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.965757 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 13:18:19 crc kubenswrapper[4787]: I0129 13:18:19.984607 4787 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.005951 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.047336 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vscvd\" (UniqueName: \"kubernetes.io/projected/25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598-kube-api-access-vscvd\") pod \"openshift-controller-manager-operator-756b6f6bc6-w2r7l\" (UID: \"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.061818 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhr62\" (UniqueName: \"kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62\") pod \"console-f9d7485db-5v5vz\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.081743 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7h4s\" (UniqueName: \"kubernetes.io/projected/24afb0d7-3f0e-479d-85dd-5aaf43928dec-kube-api-access-b7h4s\") pod \"openshift-apiserver-operator-796bbdcf4f-kc4v7\" (UID: \"24afb0d7-3f0e-479d-85dd-5aaf43928dec\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.100351 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6lw\" (UniqueName: \"kubernetes.io/projected/42992ca9-fe81-4299-bfa5-30e38dd9f127-kube-api-access-5r6lw\") pod \"machine-approver-56656f9798-sxmvk\" (UID: \"42992ca9-fe81-4299-bfa5-30e38dd9f127\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.122484 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.140709 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phk4g\" (UniqueName: \"kubernetes.io/projected/1f44e738-ab48-48a0-a9d5-c3ed59b0bd10-kube-api-access-phk4g\") pod \"cluster-image-registry-operator-dc59b4c8b-fl5rb\" (UID: \"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.142854 4787 request.go:700] Waited for 1.942734951s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/default/token Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.162159 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9khkv\" (UniqueName: \"kubernetes.io/projected/f4fd6365-d36d-4da8-8722-c4a542dae2eb-kube-api-access-9khkv\") pod \"downloads-7954f5f757-n8d4v\" (UID: \"f4fd6365-d36d-4da8-8722-c4a542dae2eb\") " pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.187594 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dtms\" (UniqueName: \"kubernetes.io/projected/0d35f531-65d6-4292-96f9-c5b3d4e31982-kube-api-access-8dtms\") pod \"openshift-config-operator-7777fb866f-cncjd\" (UID: \"0d35f531-65d6-4292-96f9-c5b3d4e31982\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.200364 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm6rj\" (UniqueName: \"kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj\") pod \"oauth-openshift-558db77b4-qg2fk\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.204834 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.224589 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.225525 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.231923 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.242602 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.246141 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.252585 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.265042 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.265210 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.283117 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.284625 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.305100 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.325138 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.345157 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.353929 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.367585 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.375750 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.384966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.429205 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8fvs\" (UniqueName: \"kubernetes.io/projected/1f143c5b-cead-45c8-8c25-3b259d28a6b4-kube-api-access-k8fvs\") pod \"migrator-59844c95c7-5bfrw\" (UID: \"1f143c5b-cead-45c8-8c25-3b259d28a6b4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.449804 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.464225 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zvp2\" (UniqueName: \"kubernetes.io/projected/b366d44d-d212-4aae-8d1f-ecd5cc7083e1-kube-api-access-4zvp2\") pod \"ingress-operator-5b745b69d9-ccc5w\" (UID: \"b366d44d-d212-4aae-8d1f-ecd5cc7083e1\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.481206 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.484108 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfqgl\" (UniqueName: \"kubernetes.io/projected/7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea-kube-api-access-cfqgl\") pod \"olm-operator-6b444d44fb-5wvf8\" (UID: \"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.507829 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kwh5\" (UniqueName: \"kubernetes.io/projected/5667f919-7345-4afa-b7bb-a89f26885147-kube-api-access-8kwh5\") pod \"multus-admission-controller-857f4d67dd-ld62n\" (UID: \"5667f919-7345-4afa-b7bb-a89f26885147\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.521124 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/298b71af-3617-4775-bb90-1b62201b557f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-p4pq9\" (UID: \"298b71af-3617-4775-bb90-1b62201b557f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.541775 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4fp7\" (UniqueName: \"kubernetes.io/projected/b069d68a-0c0d-472e-92e9-c2b2b84f11b8-kube-api-access-m4fp7\") pod \"service-ca-operator-777779d784-s9ssd\" (UID: \"b069d68a-0c0d-472e-92e9-c2b2b84f11b8\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.545380 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.563124 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.570306 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgb6g\" (UniqueName: \"kubernetes.io/projected/9604be66-c8c5-4ed4-97b9-15648be60d67-kube-api-access-kgb6g\") pod \"service-ca-9c57cc56f-zdpq4\" (UID: \"9604be66-c8c5-4ed4-97b9-15648be60d67\") " pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.588207 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv9gr\" (UniqueName: \"kubernetes.io/projected/1d559b4e-9262-4ada-8539-62bd08f3dfe4-kube-api-access-lv9gr\") pod \"machine-config-operator-74547568cd-qtfv6\" (UID: \"1d559b4e-9262-4ada-8539-62bd08f3dfe4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.605261 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2l68\" (UniqueName: \"kubernetes.io/projected/62dbfcbd-5249-4d13-9166-e9762e83f252-kube-api-access-s2l68\") pod \"package-server-manager-789f6589d5-47xjm\" (UID: \"62dbfcbd-5249-4d13-9166-e9762e83f252\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.616498 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cncjd"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.621555 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ddq9\" (UniqueName: \"kubernetes.io/projected/2f88bb10-54c4-41f6-9345-74d441059753-kube-api-access-2ddq9\") pod \"control-plane-machine-set-operator-78cbb6b69f-jdzgw\" (UID: \"2f88bb10-54c4-41f6-9345-74d441059753\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:20 crc kubenswrapper[4787]: W0129 13:18:20.628763 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d35f531_65d6_4292_96f9_c5b3d4e31982.slice/crio-2c8abdaf1a07afa300fd48697cbde7cb252c579cb6ab991fc91cb285e6f2e1a0 WatchSource:0}: Error finding container 2c8abdaf1a07afa300fd48697cbde7cb252c579cb6ab991fc91cb285e6f2e1a0: Status 404 returned error can't find the container with id 2c8abdaf1a07afa300fd48697cbde7cb252c579cb6ab991fc91cb285e6f2e1a0 Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.649032 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdv57\" (UniqueName: \"kubernetes.io/projected/d1e07830-c1c1-4fb9-8df0-019bebe9b06e-kube-api-access-mdv57\") pod \"etcd-operator-b45778765-k5sqz\" (UID: \"d1e07830-c1c1-4fb9-8df0-019bebe9b06e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.671324 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhmst\" (UniqueName: \"kubernetes.io/projected/7f5a1327-b7cf-453c-88bb-7e890ad5340e-kube-api-access-zhmst\") pod \"console-operator-58897d9998-wcqwh\" (UID: \"7f5a1327-b7cf-453c-88bb-7e890ad5340e\") " pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.684075 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.688346 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkgsz\" (UniqueName: \"kubernetes.io/projected/65ea8869-3303-4888-bae4-3be58d5097b9-kube-api-access-kkgsz\") pod \"apiserver-7bbb656c7d-2qqb8\" (UID: \"65ea8869-3303-4888-bae4-3be58d5097b9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.696104 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.706763 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.726006 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.745842 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.757704 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.761869 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.766786 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-metrics-certs\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.766835 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.766907 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chlzk\" (UniqueName: \"kubernetes.io/projected/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-kube-api-access-chlzk\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.766936 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adc1fad4-a3ae-4171-9f63-1df2446d1938-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767072 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767126 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767161 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767188 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ld5l\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767233 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-879vd\" (UniqueName: \"kubernetes.io/projected/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-kube-api-access-879vd\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767314 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767351 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.767589 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.770332 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-stats-auth\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: E0129 13:18:20.770695 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.270675165 +0000 UTC m=+140.031935511 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.770735 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fa35d5d2-7890-4427-ae66-95a23c3c62fd-metrics-tls\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.770841 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-default-certificate\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.770905 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.770969 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-962h4\" (UniqueName: \"kubernetes.io/projected/9e81570f-d024-4efc-a6cb-5c6c37338623-kube-api-access-962h4\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771003 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adc1fad4-a3ae-4171-9f63-1df2446d1938-proxy-tls\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771026 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk8hs\" (UniqueName: \"kubernetes.io/projected/adc1fad4-a3ae-4171-9f63-1df2446d1938-kube-api-access-zk8hs\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771152 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771244 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43247c40-09cc-41f7-93f8-3d169c219e1b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771444 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e81570f-d024-4efc-a6cb-5c6c37338623-service-ca-bundle\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771535 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43247c40-09cc-41f7-93f8-3d169c219e1b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771567 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771609 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43247c40-09cc-41f7-93f8-3d169c219e1b-config\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.771629 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2ghd\" (UniqueName: \"kubernetes.io/projected/fa35d5d2-7890-4427-ae66-95a23c3c62fd-kube-api-access-k2ghd\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.775220 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.779477 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.786834 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:20 crc kubenswrapper[4787]: W0129 13:18:20.788858 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25ca0fe4_3a3d_4fcf_ad32_c531e8e5a598.slice/crio-47a32b58d4135bfc84b3fab368c01853f175b8dd34cc59a6a4a3807a23925ff9 WatchSource:0}: Error finding container 47a32b58d4135bfc84b3fab368c01853f175b8dd34cc59a6a4a3807a23925ff9: Status 404 returned error can't find the container with id 47a32b58d4135bfc84b3fab368c01853f175b8dd34cc59a6a4a3807a23925ff9 Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.793840 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.825016 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.829229 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.852961 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.871985 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.872883 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873192 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2ghd\" (UniqueName: \"kubernetes.io/projected/fa35d5d2-7890-4427-ae66-95a23c3c62fd-kube-api-access-k2ghd\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bv25\" (UniqueName: \"kubernetes.io/projected/032eaa5a-856b-4af2-82cf-a9dbd690af10-kube-api-access-4bv25\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873332 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/235b8550-a08d-49cf-afcd-185b307d4db9-metrics-tls\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873356 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s74cx\" (UniqueName: \"kubernetes.io/projected/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-kube-api-access-s74cx\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873380 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r54p8\" (UniqueName: \"kubernetes.io/projected/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-kube-api-access-r54p8\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873402 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-metrics-certs\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873479 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873564 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chlzk\" (UniqueName: \"kubernetes.io/projected/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-kube-api-access-chlzk\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873591 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adc1fad4-a3ae-4171-9f63-1df2446d1938-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873684 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873729 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873756 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ld5l\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873818 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.873845 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-879vd\" (UniqueName: \"kubernetes.io/projected/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-kube-api-access-879vd\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: E0129 13:18:20.875096 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.375060118 +0000 UTC m=+140.136320394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876050 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876114 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876141 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/032eaa5a-856b-4af2-82cf-a9dbd690af10-tmpfs\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876164 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-node-bootstrap-token\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876231 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876270 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-certs\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876293 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876315 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-csi-data-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876356 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f19a654-ad05-415e-a288-2b3d7733bb00-config\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876390 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876652 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f19a654-ad05-415e-a288-2b3d7733bb00-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876752 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-stats-auth\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876803 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j9pw\" (UniqueName: \"kubernetes.io/projected/235b8550-a08d-49cf-afcd-185b307d4db9-kube-api-access-4j9pw\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876827 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmbx5\" (UniqueName: \"kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876851 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsjmj\" (UniqueName: \"kubernetes.io/projected/c146688a-141a-4102-8b8e-e496685e241f-kube-api-access-rsjmj\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876902 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fa35d5d2-7890-4427-ae66-95a23c3c62fd-metrics-tls\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876960 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-default-certificate\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.876997 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877040 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-webhook-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877103 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-962h4\" (UniqueName: \"kubernetes.io/projected/9e81570f-d024-4efc-a6cb-5c6c37338623-kube-api-access-962h4\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877197 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-socket-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877228 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-mountpoint-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877348 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adc1fad4-a3ae-4171-9f63-1df2446d1938-proxy-tls\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877383 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk8hs\" (UniqueName: \"kubernetes.io/projected/adc1fad4-a3ae-4171-9f63-1df2446d1938-kube-api-access-zk8hs\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877410 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877534 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f19a654-ad05-415e-a288-2b3d7733bb00-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877568 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-cert\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877647 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877777 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43247c40-09cc-41f7-93f8-3d169c219e1b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877810 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-plugins-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877900 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877934 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-profile-collector-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.877975 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-registration-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878033 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qr42\" (UniqueName: \"kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878198 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878304 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e81570f-d024-4efc-a6cb-5c6c37338623-service-ca-bundle\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878381 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-srv-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878423 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/235b8550-a08d-49cf-afcd-185b307d4db9-config-volume\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878445 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5stg\" (UniqueName: \"kubernetes.io/projected/0d5441d0-8b0c-47e5-b255-1e6a174e0460-kube-api-access-m5stg\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878567 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43247c40-09cc-41f7-93f8-3d169c219e1b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878602 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.878659 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43247c40-09cc-41f7-93f8-3d169c219e1b-config\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.880307 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43247c40-09cc-41f7-93f8-3d169c219e1b-config\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.881952 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adc1fad4-a3ae-4171-9f63-1df2446d1938-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: E0129 13:18:20.882745 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.382689317 +0000 UTC m=+140.143949593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.883411 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.883849 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.887021 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.888970 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.889218 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43247c40-09cc-41f7-93f8-3d169c219e1b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.890401 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9e81570f-d024-4efc-a6cb-5c6c37338623-service-ca-bundle\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.893344 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adc1fad4-a3ae-4171-9f63-1df2446d1938-proxy-tls\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.893665 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.894031 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.894576 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-metrics-certs\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.895257 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.897882 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.899751 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fa35d5d2-7890-4427-ae66-95a23c3c62fd-metrics-tls\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.899944 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-n8d4v"] Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.938552 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" event={"ID":"42992ca9-fe81-4299-bfa5-30e38dd9f127","Type":"ContainerStarted","Data":"6df8f303b8d4700ec37724481977538da4fad255083ca5eddadb294360fa960e"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.937499 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2ghd\" (UniqueName: \"kubernetes.io/projected/fa35d5d2-7890-4427-ae66-95a23c3c62fd-kube-api-access-k2ghd\") pod \"dns-operator-744455d44c-f5b6g\" (UID: \"fa35d5d2-7890-4427-ae66-95a23c3c62fd\") " pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.947810 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" event={"ID":"24afb0d7-3f0e-479d-85dd-5aaf43928dec","Type":"ContainerStarted","Data":"8bf78229ea59117e1723f76e1c0dceee96770b2e3ead274be2e5ca9d88faa2b8"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.961167 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chlzk\" (UniqueName: \"kubernetes.io/projected/5b4fbf99-cd24-4516-a01e-e1dbbadc8c72-kube-api-access-chlzk\") pod \"cluster-samples-operator-665b6dd947-fbnq6\" (UID: \"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.961501 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" event={"ID":"24a38653-de36-438f-a9d7-fde6f094004f","Type":"ContainerStarted","Data":"380e7f77ff5d33b919ee64ff944cf5bc45fa5a2aae046d19f148f1fd89ab1820"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.966960 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" event={"ID":"0d35f531-65d6-4292-96f9-c5b3d4e31982","Type":"ContainerStarted","Data":"2c8abdaf1a07afa300fd48697cbde7cb252c579cb6ab991fc91cb285e6f2e1a0"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.971075 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" event={"ID":"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598","Type":"ContainerStarted","Data":"47a32b58d4135bfc84b3fab368c01853f175b8dd34cc59a6a4a3807a23925ff9"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.971640 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ld5l\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.979914 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5v5vz" event={"ID":"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af","Type":"ContainerStarted","Data":"c438728e97f9fd4e3c2344d24f41a82440cffcd2fd54b0695954f7d96acf119d"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.980673 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981071 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j9pw\" (UniqueName: \"kubernetes.io/projected/235b8550-a08d-49cf-afcd-185b307d4db9-kube-api-access-4j9pw\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981100 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmbx5\" (UniqueName: \"kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981129 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsjmj\" (UniqueName: \"kubernetes.io/projected/c146688a-141a-4102-8b8e-e496685e241f-kube-api-access-rsjmj\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: E0129 13:18:20.981318 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.481290261 +0000 UTC m=+140.242550547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981352 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-webhook-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981399 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-socket-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981422 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-mountpoint-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981507 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981534 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f19a654-ad05-415e-a288-2b3d7733bb00-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981561 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-cert\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981594 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-plugins-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981599 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-mountpoint-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.981619 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-profile-collector-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982106 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982165 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-registration-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982200 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qr42\" (UniqueName: \"kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982271 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982334 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-srv-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982365 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/235b8550-a08d-49cf-afcd-185b307d4db9-config-volume\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982393 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5stg\" (UniqueName: \"kubernetes.io/projected/0d5441d0-8b0c-47e5-b255-1e6a174e0460-kube-api-access-m5stg\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982429 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-socket-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982498 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bv25\" (UniqueName: \"kubernetes.io/projected/032eaa5a-856b-4af2-82cf-a9dbd690af10-kube-api-access-4bv25\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982779 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/235b8550-a08d-49cf-afcd-185b307d4db9-metrics-tls\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982817 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s74cx\" (UniqueName: \"kubernetes.io/projected/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-kube-api-access-s74cx\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982843 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r54p8\" (UniqueName: \"kubernetes.io/projected/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-kube-api-access-r54p8\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982846 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-registration-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982924 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982959 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.982985 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/032eaa5a-856b-4af2-82cf-a9dbd690af10-tmpfs\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983019 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-node-bootstrap-token\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983048 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-certs\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983076 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-csi-data-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983099 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f19a654-ad05-415e-a288-2b3d7733bb00-config\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983121 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983150 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f19a654-ad05-415e-a288-2b3d7733bb00-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.983579 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-plugins-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.984145 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/032eaa5a-856b-4af2-82cf-a9dbd690af10-tmpfs\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.984745 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.984986 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:20 crc kubenswrapper[4787]: E0129 13:18:20.985232 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.485212029 +0000 UTC m=+140.246472305 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.989178 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-879vd\" (UniqueName: \"kubernetes.io/projected/9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc-kube-api-access-879vd\") pod \"kube-storage-version-migrator-operator-b67b599dd-467gl\" (UID: \"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.989595 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f19a654-ad05-415e-a288-2b3d7733bb00-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.989689 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-csi-data-dir\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.990749 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-certs\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.991204 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.991478 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/235b8550-a08d-49cf-afcd-185b307d4db9-config-volume\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.987439 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f19a654-ad05-415e-a288-2b3d7733bb00-config\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.993580 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-srv-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.994525 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" event={"ID":"a7214c59-d1ca-457b-adb6-12072f3793f1","Type":"ContainerStarted","Data":"8a0dae5c9e6aa311e967d231924238c7b1bbcff3d82adecbf40a99ba6299d816"} Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.994563 4787 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4whcq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 13:18:20 crc kubenswrapper[4787]: I0129 13:18:20.994604 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:20.995789 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-webhook-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:20.995938 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/c146688a-141a-4102-8b8e-e496685e241f-profile-collector-cert\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:20.996507 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/032eaa5a-856b-4af2-82cf-a9dbd690af10-apiservice-cert\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.002970 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/235b8550-a08d-49cf-afcd-185b307d4db9-metrics-tls\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.003740 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.003920 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/0d5441d0-8b0c-47e5-b255-1e6a174e0460-node-bootstrap-token\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.007318 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.007339 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.017150 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43247c40-09cc-41f7-93f8-3d169c219e1b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-7bxnt\" (UID: \"43247c40-09cc-41f7-93f8-3d169c219e1b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.027489 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.041899 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.048424 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-stats-auth\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.054009 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk8hs\" (UniqueName: \"kubernetes.io/projected/adc1fad4-a3ae-4171-9f63-1df2446d1938-kube-api-access-zk8hs\") pod \"machine-config-controller-84d6567774-4p5mp\" (UID: \"adc1fad4-a3ae-4171-9f63-1df2446d1938\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.054253 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.056475 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9e81570f-d024-4efc-a6cb-5c6c37338623-default-certificate\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.065181 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-cert\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.071245 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-962h4\" (UniqueName: \"kubernetes.io/projected/9e81570f-d024-4efc-a6cb-5c6c37338623-kube-api-access-962h4\") pod \"router-default-5444994796-tvvl4\" (UID: \"9e81570f-d024-4efc-a6cb-5c6c37338623\") " pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.083855 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.087371 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.587341808 +0000 UTC m=+140.348602084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.101725 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.160814 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsjmj\" (UniqueName: \"kubernetes.io/projected/c146688a-141a-4102-8b8e-e496685e241f-kube-api-access-rsjmj\") pod \"catalog-operator-68c6474976-57vst\" (UID: \"c146688a-141a-4102-8b8e-e496685e241f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.162288 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j9pw\" (UniqueName: \"kubernetes.io/projected/235b8550-a08d-49cf-afcd-185b307d4db9-kube-api-access-4j9pw\") pod \"dns-default-pgssf\" (UID: \"235b8550-a08d-49cf-afcd-185b307d4db9\") " pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.168115 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8f19a654-ad05-415e-a288-2b3d7733bb00-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-wvsdh\" (UID: \"8f19a654-ad05-415e-a288-2b3d7733bb00\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.170590 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.178093 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.181917 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.183974 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wcqwh"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.186601 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bv25\" (UniqueName: \"kubernetes.io/projected/032eaa5a-856b-4af2-82cf-a9dbd690af10-kube-api-access-4bv25\") pod \"packageserver-d55dfcdfc-wnrc8\" (UID: \"032eaa5a-856b-4af2-82cf-a9dbd690af10\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.187348 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.187824 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.687810193 +0000 UTC m=+140.449070469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.195988 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.205311 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.221810 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5stg\" (UniqueName: \"kubernetes.io/projected/0d5441d0-8b0c-47e5-b255-1e6a174e0460-kube-api-access-m5stg\") pod \"machine-config-server-wx8br\" (UID: \"0d5441d0-8b0c-47e5-b255-1e6a174e0460\") " pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.225878 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qr42\" (UniqueName: \"kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42\") pod \"collect-profiles-29494875-ln42k\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.247195 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s74cx\" (UniqueName: \"kubernetes.io/projected/7d1c9ddf-15c9-48b5-8a65-1ce9805585f9-kube-api-access-s74cx\") pod \"csi-hostpathplugin-jxhlz\" (UID: \"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9\") " pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.252138 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.252857 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.257040 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wx8br" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.260514 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r54p8\" (UniqueName: \"kubernetes.io/projected/c7488ecc-b4f0-44df-a9e2-fe778188b4e0-kube-api-access-r54p8\") pod \"ingress-canary-hwc5r\" (UID: \"c7488ecc-b4f0-44df-a9e2-fe778188b4e0\") " pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.272656 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmbx5\" (UniqueName: \"kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5\") pod \"marketplace-operator-79b997595-fxcnw\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.290299 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.290554 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.790505431 +0000 UTC m=+140.551765707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.290739 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.291087 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.791071979 +0000 UTC m=+140.552332255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.297056 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f143c5b_cead_45c8_8c25_3b259d28a6b4.slice/crio-a6f75b51bd4e0154196e3ad551389cd0bdd8129c7ce812a2af8c8fa7a0ab7daa WatchSource:0}: Error finding container a6f75b51bd4e0154196e3ad551389cd0bdd8129c7ce812a2af8c8fa7a0ab7daa: Status 404 returned error can't find the container with id a6f75b51bd4e0154196e3ad551389cd0bdd8129c7ce812a2af8c8fa7a0ab7daa Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.301499 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8"] Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.310620 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f66be6f_c8e3_42f5_ba6c_ef8a9b3de8ea.slice/crio-495afc341f52fc4dd9d8af4a38c9da3488b343adf86ee74333353d17fbb71142 WatchSource:0}: Error finding container 495afc341f52fc4dd9d8af4a38c9da3488b343adf86ee74333353d17fbb71142: Status 404 returned error can't find the container with id 495afc341f52fc4dd9d8af4a38c9da3488b343adf86ee74333353d17fbb71142 Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.320495 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb366d44d_d212_4aae_8d1f_ecd5cc7083e1.slice/crio-0a7b0c71ca6e81ee528f3458fd8df75438e59b6aa30b82228f21b8a01e2928eb WatchSource:0}: Error finding container 0a7b0c71ca6e81ee528f3458fd8df75438e59b6aa30b82228f21b8a01e2928eb: Status 404 returned error can't find the container with id 0a7b0c71ca6e81ee528f3458fd8df75438e59b6aa30b82228f21b8a01e2928eb Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.324828 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.391416 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.392773 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.892748644 +0000 UTC m=+140.654008920 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.409416 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.468895 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.492005 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.493562 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.493907 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:21.993892611 +0000 UTC m=+140.755152887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.531297 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.532716 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.532769 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.544171 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-hwc5r" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.546376 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-k5sqz"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.592839 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.594536 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.595072 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.095039478 +0000 UTC m=+140.856299754 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.596684 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.598000 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ld62n"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.698444 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.699413 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.199369509 +0000 UTC m=+140.960629785 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.700824 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.801149 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.802303 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.302273714 +0000 UTC m=+141.063533990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.802409 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d5441d0_8b0c_47e5_b255_1e6a174e0460.slice/crio-6f1d0f0c26151be59f1336fe1914527a70ce0ee5d77aa5ef7b45b476d453514e WatchSource:0}: Error finding container 6f1d0f0c26151be59f1336fe1914527a70ce0ee5d77aa5ef7b45b476d453514e: Status 404 returned error can't find the container with id 6f1d0f0c26151be59f1336fe1914527a70ce0ee5d77aa5ef7b45b476d453514e Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.889618 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zdpq4"] Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.890324 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podStartSLOduration=116.890266902 podStartE2EDuration="1m56.890266902s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:21.885217348 +0000 UTC m=+140.646477624" watchObservedRunningTime="2026-01-29 13:18:21.890266902 +0000 UTC m=+140.651527178" Jan 29 13:18:21 crc kubenswrapper[4787]: I0129 13:18:21.905221 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:21 crc kubenswrapper[4787]: E0129 13:18:21.905691 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.405675255 +0000 UTC m=+141.166935531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.906916 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb069d68a_0c0d_472e_92e9_c2b2b84f11b8.slice/crio-4e3af867b69e0f898fdd0eac477d3b6e4daf7b3bc8cedf5a9c4b811157ac11ba WatchSource:0}: Error finding container 4e3af867b69e0f898fdd0eac477d3b6e4daf7b3bc8cedf5a9c4b811157ac11ba: Status 404 returned error can't find the container with id 4e3af867b69e0f898fdd0eac477d3b6e4daf7b3bc8cedf5a9c4b811157ac11ba Jan 29 13:18:21 crc kubenswrapper[4787]: W0129 13:18:21.952075 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9604be66_c8c5_4ed4_97b9_15648be60d67.slice/crio-40812351574d1d891b673668bda863a75ce9059ad1a8f6f892e97616c01c86ab WatchSource:0}: Error finding container 40812351574d1d891b673668bda863a75ce9059ad1a8f6f892e97616c01c86ab: Status 404 returned error can't find the container with id 40812351574d1d891b673668bda863a75ce9059ad1a8f6f892e97616c01c86ab Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.006905 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.007176 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.507122062 +0000 UTC m=+141.268382338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.007262 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.007730 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.507712191 +0000 UTC m=+141.268972467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.015706 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" event={"ID":"b069d68a-0c0d-472e-92e9-c2b2b84f11b8","Type":"ContainerStarted","Data":"4e3af867b69e0f898fdd0eac477d3b6e4daf7b3bc8cedf5a9c4b811157ac11ba"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.023890 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" event={"ID":"b366d44d-d212-4aae-8d1f-ecd5cc7083e1","Type":"ContainerStarted","Data":"0a7b0c71ca6e81ee528f3458fd8df75438e59b6aa30b82228f21b8a01e2928eb"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.026481 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" event={"ID":"42992ca9-fe81-4299-bfa5-30e38dd9f127","Type":"ContainerStarted","Data":"ad572e520d2f4c5a3187dce016404c06531421ddfc769572186657186b6c5b88"} Jan 29 13:18:22 crc kubenswrapper[4787]: W0129 13:18:22.028711 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e81570f_d024_4efc_a6cb_5c6c37338623.slice/crio-616e3d8e694932b413aa7285cce38e25d86ffed1976d7529552e90eed6d393b0 WatchSource:0}: Error finding container 616e3d8e694932b413aa7285cce38e25d86ffed1976d7529552e90eed6d393b0: Status 404 returned error can't find the container with id 616e3d8e694932b413aa7285cce38e25d86ffed1976d7529552e90eed6d393b0 Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.040531 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" event={"ID":"24afb0d7-3f0e-479d-85dd-5aaf43928dec","Type":"ContainerStarted","Data":"b08b8d04fc752724fce7dd1f15697f0b233d573bc33e9f3d6ee08ce8effb6cdc"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.062930 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wx8br" event={"ID":"0d5441d0-8b0c-47e5-b255-1e6a174e0460","Type":"ContainerStarted","Data":"6f1d0f0c26151be59f1336fe1914527a70ce0ee5d77aa5ef7b45b476d453514e"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.108332 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.110064 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.610037887 +0000 UTC m=+141.371298173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.121133 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" event={"ID":"a7214c59-d1ca-457b-adb6-12072f3793f1","Type":"ContainerStarted","Data":"e5e0bfaa0702985365f8a560a0547c9b905b0adf46e10014589d825d1583409d"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.132715 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-f5b6g"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.133261 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podStartSLOduration=117.133235213 podStartE2EDuration="1m57.133235213s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.119849216 +0000 UTC m=+140.881109492" watchObservedRunningTime="2026-01-29 13:18:22.133235213 +0000 UTC m=+140.894495489" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.147299 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerStarted","Data":"5a7943d335cb0bd8ec2bc94c7c48c5510b22231a127a376125e1e6dadc47a52c"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.148571 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.152697 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" event={"ID":"62dbfcbd-5249-4d13-9166-e9762e83f252","Type":"ContainerStarted","Data":"04bcb2b27afaba761fd4ddaf5e5bf499b55db860354e61d70f9a1e5d7fbc11ca"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.157143 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" event={"ID":"25ca0fe4-3a3d-4fcf-ad32-c531e8e5a598","Type":"ContainerStarted","Data":"4d70e6cc461d1cbb4f2aca12d1070e46cfc10c748f948b5622a68d42f33a1b5f"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.167261 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.167331 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.195417 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.202160 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" event={"ID":"65ea8869-3303-4888-bae4-3be58d5097b9","Type":"ContainerStarted","Data":"558794d702f90a0ace05cd817b4f66174a3ec84b53f774526e1197b695de0661"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.206878 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" event={"ID":"9604be66-c8c5-4ed4-97b9-15648be60d67","Type":"ContainerStarted","Data":"40812351574d1d891b673668bda863a75ce9059ad1a8f6f892e97616c01c86ab"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.208429 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.211329 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.212331 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.712316191 +0000 UTC m=+141.473576467 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.213250 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.226341 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" event={"ID":"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea","Type":"ContainerStarted","Data":"495afc341f52fc4dd9d8af4a38c9da3488b343adf86ee74333353d17fbb71142"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.230035 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" event={"ID":"2f88bb10-54c4-41f6-9345-74d441059753","Type":"ContainerStarted","Data":"529747d76058c85da34bfa8edb0f08e0ac1f7ba2778ef126c0454d4f30aec717"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.257507 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" event={"ID":"1f143c5b-cead-45c8-8c25-3b259d28a6b4","Type":"ContainerStarted","Data":"a6f75b51bd4e0154196e3ad551389cd0bdd8129c7ce812a2af8c8fa7a0ab7daa"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.271958 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" event={"ID":"298b71af-3617-4775-bb90-1b62201b557f","Type":"ContainerStarted","Data":"86be814b997068ec55587f5fd0b4646a8e92cdc0fea3f5f4f3a2312408f80beb"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.280930 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" event={"ID":"24a38653-de36-438f-a9d7-fde6f094004f","Type":"ContainerStarted","Data":"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.282353 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.292240 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-vlzsj" podStartSLOduration=117.292211815 podStartE2EDuration="1m57.292211815s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.238591217 +0000 UTC m=+140.999851503" watchObservedRunningTime="2026-01-29 13:18:22.292211815 +0000 UTC m=+141.053472091" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.301232 4787 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qg2fk container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" start-of-body= Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.301437 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.312334 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.313822 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.813788039 +0000 UTC m=+141.575048315 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.320141 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.323619 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.823604049 +0000 UTC m=+141.584864325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.336801 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.349985 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" event={"ID":"d1e07830-c1c1-4fb9-8df0-019bebe9b06e","Type":"ContainerStarted","Data":"f99e880f37a8da7e540a653baa72c7267c3d7610d933ad923e60e13689debce3"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.365877 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" event={"ID":"5667f919-7345-4afa-b7bb-a89f26885147","Type":"ContainerStarted","Data":"979710cfa8842d69001133f4f15a0a43802070d40bf0b4a54cb3164fabf3054c"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.373054 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" event={"ID":"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10","Type":"ContainerStarted","Data":"a707f94b823f93f0b356f8cf24718c6fe1054a8970d4bae9f3a81a3f7822e92f"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.382815 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" event={"ID":"1d559b4e-9262-4ada-8539-62bd08f3dfe4","Type":"ContainerStarted","Data":"351059bc9c3bcf317d5f234e2eead40e8198225229152dce1f4f01d409d4f119"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.388920 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerID="f42a279e1ed3c53387cc0eedf6449af02fb6a5bcbb911fce4e3db83bf815ae39" exitCode=0 Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.389790 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" event={"ID":"0d35f531-65d6-4292-96f9-c5b3d4e31982","Type":"ContainerDied","Data":"f42a279e1ed3c53387cc0eedf6449af02fb6a5bcbb911fce4e3db83bf815ae39"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.412562 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" event={"ID":"7f5a1327-b7cf-453c-88bb-7e890ad5340e","Type":"ContainerStarted","Data":"b8097f4f9224357161f2f9d34264a56f5f51682c14464d9f2ca12f9ed908dd4f"} Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.412780 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.422378 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.424638 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:22.924621252 +0000 UTC m=+141.685881528 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.425805 4787 patch_prober.go:28] interesting pod/console-operator-58897d9998-wcqwh container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.425874 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" podUID="7f5a1327-b7cf-453c-88bb-7e890ad5340e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.538930 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.541188 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.041169791 +0000 UTC m=+141.802430067 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.640467 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.648908 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.148839991 +0000 UTC m=+141.910100267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.660007 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.703624 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.750154 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.750673 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.25065087 +0000 UTC m=+142.011911316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.754343 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-pgssf"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.756620 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh"] Jan 29 13:18:22 crc kubenswrapper[4787]: W0129 13:18:22.785904 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9add0ecc_afd4_4dd9_b7e4_c0ae9fe22afc.slice/crio-d3797dd4f38f47245560f09b0950ba6f623b2aa1b612df274964b63d434bfa56 WatchSource:0}: Error finding container d3797dd4f38f47245560f09b0950ba6f623b2aa1b612df274964b63d434bfa56: Status 404 returned error can't find the container with id d3797dd4f38f47245560f09b0950ba6f623b2aa1b612df274964b63d434bfa56 Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.799536 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-qbzqd" podStartSLOduration=118.799510522 podStartE2EDuration="1m58.799510522s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.769663629 +0000 UTC m=+141.530923905" watchObservedRunningTime="2026-01-29 13:18:22.799510522 +0000 UTC m=+141.560770798" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.817383 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-hwc5r"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.854077 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.855268 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.355246269 +0000 UTC m=+142.116506545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.866018 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" podStartSLOduration=118.86599645 podStartE2EDuration="1m58.86599645s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.865224185 +0000 UTC m=+141.626484461" watchObservedRunningTime="2026-01-29 13:18:22.86599645 +0000 UTC m=+141.627256726" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.896205 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" podStartSLOduration=117.896179364 podStartE2EDuration="1m57.896179364s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.895492301 +0000 UTC m=+141.656752577" watchObservedRunningTime="2026-01-29 13:18:22.896179364 +0000 UTC m=+141.657439670" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.937340 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.951879 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" podStartSLOduration=117.951835728 podStartE2EDuration="1m57.951835728s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:22.939183636 +0000 UTC m=+141.700443942" watchObservedRunningTime="2026-01-29 13:18:22.951835728 +0000 UTC m=+141.713096004" Jan 29 13:18:22 crc kubenswrapper[4787]: I0129 13:18:22.957074 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:22 crc kubenswrapper[4787]: E0129 13:18:22.957738 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.45771759 +0000 UTC m=+142.218977876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.051729 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-n8d4v" podStartSLOduration=118.051703364 podStartE2EDuration="1m58.051703364s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.050333909 +0000 UTC m=+141.811594195" watchObservedRunningTime="2026-01-29 13:18:23.051703364 +0000 UTC m=+141.812963640" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.058900 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.065944 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jxhlz"] Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.066792 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.566755704 +0000 UTC m=+142.328015980 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.070023 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k"] Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.118727 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" podStartSLOduration=119.118661036 podStartE2EDuration="1m59.118661036s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.109117945 +0000 UTC m=+141.870378221" watchObservedRunningTime="2026-01-29 13:18:23.118661036 +0000 UTC m=+141.879921312" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.149606 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kc4v7" podStartSLOduration=119.149581744 podStartE2EDuration="1m59.149581744s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.147264639 +0000 UTC m=+141.908524915" watchObservedRunningTime="2026-01-29 13:18:23.149581744 +0000 UTC m=+141.910842020" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.170579 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.670552208 +0000 UTC m=+142.431812484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.170643 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.182271 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-w2r7l" podStartSLOduration=118.182246139 podStartE2EDuration="1m58.182246139s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.180376928 +0000 UTC m=+141.941637204" watchObservedRunningTime="2026-01-29 13:18:23.182246139 +0000 UTC m=+141.943506415" Jan 29 13:18:23 crc kubenswrapper[4787]: W0129 13:18:23.183828 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7488ecc_b4f0_44df_a9e2_fe778188b4e0.slice/crio-65291495868e688af2c14088f1cbec5824089d4fda8460b5c8d5b4ea88f1ad11 WatchSource:0}: Error finding container 65291495868e688af2c14088f1cbec5824089d4fda8460b5c8d5b4ea88f1ad11: Status 404 returned error can't find the container with id 65291495868e688af2c14088f1cbec5824089d4fda8460b5c8d5b4ea88f1ad11 Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.272095 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.272432 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.772410738 +0000 UTC m=+142.533671004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.374191 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.379243 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.87920625 +0000 UTC m=+142.640466526 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.432916 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" event={"ID":"c146688a-141a-4102-8b8e-e496685e241f","Type":"ContainerStarted","Data":"98775edac6714272fe8839cca084244b2b5e224984d0c8800a3fc40c3d222e46"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.434071 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" event={"ID":"032eaa5a-856b-4af2-82cf-a9dbd690af10","Type":"ContainerStarted","Data":"67f9af1572f67b22c47ad0ddd29f7902cc69a59a9d338b13588f7ff5490ed646"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.437759 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" event={"ID":"b366d44d-d212-4aae-8d1f-ecd5cc7083e1","Type":"ContainerStarted","Data":"2c5218ef9655b6b6ca3fdecb0158df53abd99f95579649a8b98cfa57f3f944b9"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.438874 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" event={"ID":"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72","Type":"ContainerStarted","Data":"318c00ed3f40b294d1d0e11e98e6f0fead648e97e90e1af1134e2046654f82a1"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.445941 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" event={"ID":"b069d68a-0c0d-472e-92e9-c2b2b84f11b8","Type":"ContainerStarted","Data":"4757467be96c336648091a0c8c51bcb859694cce36a353f09d9ed9d26e256009"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.448933 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" event={"ID":"8a190e8a-8a77-4af5-84ae-83976f1f88d1","Type":"ContainerStarted","Data":"3b60ec040ad6c0b3223c8dafbc936b5d7bd12099187775a11bc8820bb700c33c"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.460021 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" event={"ID":"43247c40-09cc-41f7-93f8-3d169c219e1b","Type":"ContainerStarted","Data":"fe873c5be91784ff5badbbba5b1d11f75b3c22d7077cdb07c81ab5f6395f96a7"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.462644 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" event={"ID":"adc1fad4-a3ae-4171-9f63-1df2446d1938","Type":"ContainerStarted","Data":"97c64cbb56c5ca9129e1e21e003ef220c80da1d1d2f70d194a59670412327706"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.481095 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.481381 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.98135885 +0000 UTC m=+142.742619126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.481919 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.482386 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:23.982377923 +0000 UTC m=+142.743638189 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.498320 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" event={"ID":"62dbfcbd-5249-4d13-9166-e9762e83f252","Type":"ContainerStarted","Data":"c7250209be27355b4fd66a0a31ccf454a04f1555e02f0b62420707510656a7af"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.526350 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" event={"ID":"1f143c5b-cead-45c8-8c25-3b259d28a6b4","Type":"ContainerStarted","Data":"8a17bd459689763e086e562e0de91650ce66366b360f57e4e78754c62944be94"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.528712 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5v5vz" event={"ID":"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af","Type":"ContainerStarted","Data":"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.547411 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" event={"ID":"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc","Type":"ContainerStarted","Data":"d3797dd4f38f47245560f09b0950ba6f623b2aa1b612df274964b63d434bfa56"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.549995 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-fl5rb" event={"ID":"1f44e738-ab48-48a0-a9d5-c3ed59b0bd10","Type":"ContainerStarted","Data":"9603bfd5db251ef211cdf846c42e37dd0d801c8d36a929f22d0b0e4fe57446cd"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.552358 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" event={"ID":"7f5a1327-b7cf-453c-88bb-7e890ad5340e","Type":"ContainerStarted","Data":"2929556fb7feee69021b4b10ca7dc1e6cb0cbca95aea8903a224d32017cf26f1"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.554189 4787 patch_prober.go:28] interesting pod/console-operator-58897d9998-wcqwh container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.554234 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" podUID="7f5a1327-b7cf-453c-88bb-7e890ad5340e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.554768 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" event={"ID":"fa35d5d2-7890-4427-ae66-95a23c3c62fd","Type":"ContainerStarted","Data":"ff3100b06af35ba6f32808a12b1716a3da12207b418906003772f3cb79de65d2"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.559089 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" event={"ID":"ceef637f-9cff-4fce-95d5-7174181e363d","Type":"ContainerStarted","Data":"dec56c7819c1fd6d01c98f5221532816600b0b67355a64bfc5156fddc0646bb9"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.569965 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerStarted","Data":"f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.570842 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.570892 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.574095 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tvvl4" event={"ID":"9e81570f-d024-4efc-a6cb-5c6c37338623","Type":"ContainerStarted","Data":"616e3d8e694932b413aa7285cce38e25d86ffed1976d7529552e90eed6d393b0"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.581118 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" event={"ID":"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9","Type":"ContainerStarted","Data":"a24ae676d58dbc5c27cfb60cf7c62db5b660d28278989461f2986f75cf6efe3b"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.582731 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.582935 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.08290584 +0000 UTC m=+142.844166116 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.583384 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.583787 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.083779389 +0000 UTC m=+142.845039665 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.585833 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-hwc5r" event={"ID":"c7488ecc-b4f0-44df-a9e2-fe778188b4e0","Type":"ContainerStarted","Data":"65291495868e688af2c14088f1cbec5824089d4fda8460b5c8d5b4ea88f1ad11"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.592054 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pgssf" event={"ID":"235b8550-a08d-49cf-afcd-185b307d4db9","Type":"ContainerStarted","Data":"1338425dd298ec47dfe61e4341f1c30f0800c5485ba5180ff1fbb60b869f399a"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.596770 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" event={"ID":"7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea","Type":"ContainerStarted","Data":"39cfe0277d86a873b7848c68381571191efe08869687325995497e02e47f9cc0"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.597976 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.602635 4787 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5wvf8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.602707 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" podUID="7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.604797 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" event={"ID":"8f19a654-ad05-415e-a288-2b3d7733bb00","Type":"ContainerStarted","Data":"3f635885c2bc494c532e7cc3f573f840231220a5151fd03ed20bd099774cb6bd"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.611525 4787 generic.go:334] "Generic (PLEG): container finished" podID="65ea8869-3303-4888-bae4-3be58d5097b9" containerID="b0a4ed2091576ea63dc411140c02677ca796842d965f18663796d46ea93032cf" exitCode=0 Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.611684 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" event={"ID":"65ea8869-3303-4888-bae4-3be58d5097b9","Type":"ContainerDied","Data":"b0a4ed2091576ea63dc411140c02677ca796842d965f18663796d46ea93032cf"} Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.613468 4787 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-qg2fk container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" start-of-body= Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.613519 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.19:6443/healthz\": dial tcp 10.217.0.19:6443: connect: connection refused" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.687060 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.687892 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.187780859 +0000 UTC m=+142.949041135 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.688320 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.692759 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.192739721 +0000 UTC m=+142.953999997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.789658 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.793327 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.289788894 +0000 UTC m=+143.051049170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.793785 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.794226 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.294214839 +0000 UTC m=+143.055475115 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.843145 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5v5vz" podStartSLOduration=118.843127163 podStartE2EDuration="1m58.843127163s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.841164919 +0000 UTC m=+142.602425195" watchObservedRunningTime="2026-01-29 13:18:23.843127163 +0000 UTC m=+142.604387439" Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.897483 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:23 crc kubenswrapper[4787]: E0129 13:18:23.897865 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.397843917 +0000 UTC m=+143.159104193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:23 crc kubenswrapper[4787]: I0129 13:18:23.922086 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" podStartSLOduration=118.922061956 podStartE2EDuration="1m58.922061956s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.919567725 +0000 UTC m=+142.680828011" watchObservedRunningTime="2026-01-29 13:18:23.922061956 +0000 UTC m=+142.683322232" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.002414 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.002853 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.502839599 +0000 UTC m=+143.264099865 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.004227 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-s9ssd" podStartSLOduration=119.004199234 podStartE2EDuration="1m59.004199234s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:23.996162602 +0000 UTC m=+142.757422878" watchObservedRunningTime="2026-01-29 13:18:24.004199234 +0000 UTC m=+142.765459500" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.103421 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.103864 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.603841382 +0000 UTC m=+143.365101658 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.161090 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.161187 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.170778 4787 patch_prober.go:28] interesting pod/apiserver-76f77b778f-6hc9z container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.170862 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" podUID="a7214c59-d1ca-457b-adb6-12072f3793f1" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.205669 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.206135 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.706119965 +0000 UTC m=+143.467380241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.307087 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.307329 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.807288843 +0000 UTC m=+143.568549109 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.307583 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.308055 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.808035447 +0000 UTC m=+143.569295723 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.408783 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.409031 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.908991849 +0000 UTC m=+143.670252125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.409238 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.409663 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:24.90965548 +0000 UTC m=+143.670915756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.514765 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.515141 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.015099548 +0000 UTC m=+143.776359824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.515492 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.516025 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.016006807 +0000 UTC m=+143.777267083 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.616643 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.616836 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.116807853 +0000 UTC m=+143.878068129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.617181 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.617584 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.117571478 +0000 UTC m=+143.878831754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.619433 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" event={"ID":"2f88bb10-54c4-41f6-9345-74d441059753","Type":"ContainerStarted","Data":"5b2500bd934ec0978841acc27c765851686902a7af7576a5cbf69ba0889d2090"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.621056 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" event={"ID":"1d559b4e-9262-4ada-8539-62bd08f3dfe4","Type":"ContainerStarted","Data":"7f1d34e2d684b2232fc593c1e277def675fbeeae4ecd478cadfdc8c4b38a804d"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.624650 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" event={"ID":"8a190e8a-8a77-4af5-84ae-83976f1f88d1","Type":"ContainerStarted","Data":"a5bc2bf57f2925912ac6ffb7508963173f28a9303b0c91e717294a992b8436ca"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.626811 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" event={"ID":"1f143c5b-cead-45c8-8c25-3b259d28a6b4","Type":"ContainerStarted","Data":"9160e324e44a228252d76f9d5385d52a4ccb1d26681cdac6bd46cb2c94e6d664"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.628207 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" event={"ID":"9604be66-c8c5-4ed4-97b9-15648be60d67","Type":"ContainerStarted","Data":"2bc4db13c91dd3e20281feefba73e52481a0c7c7cc1539f4c9c72b29da0de05a"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.634432 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" event={"ID":"42992ca9-fe81-4299-bfa5-30e38dd9f127","Type":"ContainerStarted","Data":"56421074e111a3f064d63d6323789b37aebc3fd5a69b825324b12a79bb38e4b4"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.636428 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" event={"ID":"62dbfcbd-5249-4d13-9166-e9762e83f252","Type":"ContainerStarted","Data":"85d1dbe4975d5eb148f9f3fd772f0ec5372309989fefec93342cb3ca3f449f95"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.637802 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" event={"ID":"d1e07830-c1c1-4fb9-8df0-019bebe9b06e","Type":"ContainerStarted","Data":"312cb84501c47263d66fee4d17247fc908418b0b1693c396684860ae8ade4370"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.639790 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" event={"ID":"0d35f531-65d6-4292-96f9-c5b3d4e31982","Type":"ContainerStarted","Data":"cc7e4075c2da9de75f8649ca137a9969b6f78b8dd2d8e7b441c2edb8921e0c4c"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.641008 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" event={"ID":"adc1fad4-a3ae-4171-9f63-1df2446d1938","Type":"ContainerStarted","Data":"fa606bd7a9a7d212662ce19fdbd1f91d8745deae227e75c6bdcdd00238c09dd7"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.647173 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" event={"ID":"b366d44d-d212-4aae-8d1f-ecd5cc7083e1","Type":"ContainerStarted","Data":"25c029604ff135db7544f97699ed3f9bf725daa8f2a6bd1f951a9c938836c58b"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.662907 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" event={"ID":"5667f919-7345-4afa-b7bb-a89f26885147","Type":"ContainerStarted","Data":"9236e1ef3052e3f0384b6fe7c333f8551cbc47c1838551248736928901394059"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.667064 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" event={"ID":"8f19a654-ad05-415e-a288-2b3d7733bb00","Type":"ContainerStarted","Data":"ec59fec9fedac144704dd45b0eb3cfe85ae2816aa0e89424b85be4fc12d0c91d"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.672129 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-tvvl4" event={"ID":"9e81570f-d024-4efc-a6cb-5c6c37338623","Type":"ContainerStarted","Data":"f32aa97203a3c677ac74c24d3d23aae934143a47777e09f1f7d2eaffdc1cd289"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.675338 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" event={"ID":"032eaa5a-856b-4af2-82cf-a9dbd690af10","Type":"ContainerStarted","Data":"5d06a76f87d7cb54014f589a08a956d1a9c3a36623efe514bf2514d00359da8f"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.677929 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" event={"ID":"298b71af-3617-4775-bb90-1b62201b557f","Type":"ContainerStarted","Data":"a90863b2712f638195ceb4c39b045649ada9174f9a567c573f5edaf8bcb5253e"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.681838 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wx8br" event={"ID":"0d5441d0-8b0c-47e5-b255-1e6a174e0460","Type":"ContainerStarted","Data":"0269fe5e65941ac28d5693920ace610f8d8757f5c457058a10ada11c59daf3f6"} Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682394 4787 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5wvf8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682446 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" podUID="7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682517 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682583 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682766 4787 patch_prober.go:28] interesting pod/console-operator-58897d9998-wcqwh container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.682898 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" podUID="7f5a1327-b7cf-453c-88bb-7e890ad5340e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/readyz\": dial tcp 10.217.0.14:8443: connect: connection refused" Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.718822 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.719037 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.218995974 +0000 UTC m=+143.980256250 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.719166 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.719734 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.219720978 +0000 UTC m=+143.980981414 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.820754 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.820989 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.320950798 +0000 UTC m=+144.082211084 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.821521 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.821876 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.321859458 +0000 UTC m=+144.083119734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.922570 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.922809 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.422765177 +0000 UTC m=+144.184025453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:24 crc kubenswrapper[4787]: I0129 13:18:24.922876 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:24 crc kubenswrapper[4787]: E0129 13:18:24.923325 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.423300674 +0000 UTC m=+144.184560950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.024672 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.025127 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.525094403 +0000 UTC m=+144.286354679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.126513 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.126993 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.626973414 +0000 UTC m=+144.388233690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.228520 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.228661 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.728634058 +0000 UTC m=+144.489894334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.229339 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.229779 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.729766535 +0000 UTC m=+144.491026811 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.330302 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.330515 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.830485168 +0000 UTC m=+144.591745444 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.330622 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.331115 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.831104718 +0000 UTC m=+144.592365004 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.432163 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.432734 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:25.93270354 +0000 UTC m=+144.693963816 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.534282 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.534843 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.034815749 +0000 UTC m=+144.796076195 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.635766 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.636003 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.135960696 +0000 UTC m=+144.897220972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.636351 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.636862 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.136851735 +0000 UTC m=+144.898112011 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.687670 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-hwc5r" event={"ID":"c7488ecc-b4f0-44df-a9e2-fe778188b4e0","Type":"ContainerStarted","Data":"fcb8f47ed383a10ef083f6e26dfa0045b11d3bbe91aa7a0757decbbb5c195a04"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.689587 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" event={"ID":"9add0ecc-afd4-4dd9-b7e4-c0ae9fe22afc","Type":"ContainerStarted","Data":"2c7783191b79505b6afc456f62a4236c5583141fca973fb949bc98129f209e61"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.691936 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" event={"ID":"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72","Type":"ContainerStarted","Data":"18710250e400d1c349a3a89162f8dd6b1bcb2abf82b1f98ca21eeac7b6e1fd03"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.693667 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" event={"ID":"ceef637f-9cff-4fce-95d5-7174181e363d","Type":"ContainerStarted","Data":"c17994ae0e03bf9a5bcc584e40d456b280539099db9d99b70cfe2fb8b35aa5dd"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.695072 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" event={"ID":"fa35d5d2-7890-4427-ae66-95a23c3c62fd","Type":"ContainerStarted","Data":"08fdc221dbf932c6c18fe3b4977278f4d4bcb9cf73053565f6d68152374f90a5"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.696627 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" event={"ID":"c146688a-141a-4102-8b8e-e496685e241f","Type":"ContainerStarted","Data":"301d6f0dd7df12ddac5522264c645349c4e61bcabb6cc6cc770671e1eaf9a3a0"} Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.697572 4787 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-5wvf8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.697622 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" podUID="7f66be6f-c8e3-42f5-ba6c-ef8a9b3de8ea" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.716740 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-k5sqz" podStartSLOduration=120.716718469 podStartE2EDuration="2m0.716718469s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:25.711406036 +0000 UTC m=+144.472666312" watchObservedRunningTime="2026-01-29 13:18:25.716718469 +0000 UTC m=+144.477978745" Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.732964 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zdpq4" podStartSLOduration=120.732933177 podStartE2EDuration="2m0.732933177s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:25.732175073 +0000 UTC m=+144.493435349" watchObservedRunningTime="2026-01-29 13:18:25.732933177 +0000 UTC m=+144.494193453" Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.738021 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.738365 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.238342734 +0000 UTC m=+144.999603010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.839948 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.841691 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.341675642 +0000 UTC m=+145.102935918 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.925193 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.941005 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.941182 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.441153795 +0000 UTC m=+145.202414091 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:25 crc kubenswrapper[4787]: I0129 13:18:25.941528 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:25 crc kubenswrapper[4787]: E0129 13:18:25.942012 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.441991982 +0000 UTC m=+145.203252258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.043039 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.043187 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.543162081 +0000 UTC m=+145.304422357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.043811 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.044439 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.544402771 +0000 UTC m=+145.305663227 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.145735 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.145949 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.6459203 +0000 UTC m=+145.407180586 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.146649 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.147113 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.647090428 +0000 UTC m=+145.408350704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.260351 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.260860 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.760834396 +0000 UTC m=+145.522094672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.362405 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.362852 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.862835181 +0000 UTC m=+145.624095457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.464692 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.464973 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:26.96494849 +0000 UTC m=+145.726208766 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.566533 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.566980 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.066956726 +0000 UTC m=+145.828217002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.667362 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.667577 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.167514814 +0000 UTC m=+145.928775090 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.667688 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.668042 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.168034791 +0000 UTC m=+145.929295067 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.704243 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" event={"ID":"65ea8869-3303-4888-bae4-3be58d5097b9","Type":"ContainerStarted","Data":"9745102f467a95823fe5decac7d5774977f7bde20e8c5d28896a834bd59327e2"} Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.706017 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pgssf" event={"ID":"235b8550-a08d-49cf-afcd-185b307d4db9","Type":"ContainerStarted","Data":"43b6573b1cdf015450ba43177df1c9e8b7deee4d7425f735c00afda364b4e6bc"} Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.708403 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" event={"ID":"43247c40-09cc-41f7-93f8-3d169c219e1b","Type":"ContainerStarted","Data":"f0de129131f2839783b84888a8184c08bf157addc32977a7f2c2df2772863ec4"} Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.708497 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.708542 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.710718 4787 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-57vst container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.710776 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" podUID="c146688a-141a-4102-8b8e-e496685e241f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.722906 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" podStartSLOduration=121.722886509 podStartE2EDuration="2m1.722886509s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.720690397 +0000 UTC m=+145.481950673" watchObservedRunningTime="2026-01-29 13:18:26.722886509 +0000 UTC m=+145.484146785" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.744437 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-wvsdh" podStartSLOduration=121.744419931 podStartE2EDuration="2m1.744419931s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.742090865 +0000 UTC m=+145.503351141" watchObservedRunningTime="2026-01-29 13:18:26.744419931 +0000 UTC m=+145.505680207" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.768504 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" podStartSLOduration=121.768475135 podStartE2EDuration="2m1.768475135s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.760858256 +0000 UTC m=+145.522118532" watchObservedRunningTime="2026-01-29 13:18:26.768475135 +0000 UTC m=+145.529735411" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.769608 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.769702 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.269687064 +0000 UTC m=+146.030947340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.770870 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.772761 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.272742594 +0000 UTC m=+146.034002870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.795699 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-tvvl4" podStartSLOduration=121.795669831 podStartE2EDuration="2m1.795669831s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.794212794 +0000 UTC m=+145.555473070" watchObservedRunningTime="2026-01-29 13:18:26.795669831 +0000 UTC m=+145.556930107" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.812661 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-hwc5r" podStartSLOduration=9.812638514 podStartE2EDuration="9.812638514s" podCreationTimestamp="2026-01-29 13:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.810728502 +0000 UTC m=+145.571988778" watchObservedRunningTime="2026-01-29 13:18:26.812638514 +0000 UTC m=+145.573898790" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.836149 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-467gl" podStartSLOduration=121.83612619 podStartE2EDuration="2m1.83612619s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.834727835 +0000 UTC m=+145.595988111" watchObservedRunningTime="2026-01-29 13:18:26.83612619 +0000 UTC m=+145.597386466" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.864200 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" podStartSLOduration=121.864171014 podStartE2EDuration="2m1.864171014s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.863836143 +0000 UTC m=+145.625096409" watchObservedRunningTime="2026-01-29 13:18:26.864171014 +0000 UTC m=+145.625431290" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.872053 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.872302 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.372268788 +0000 UTC m=+146.133529064 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.872479 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.872867 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.372854267 +0000 UTC m=+146.134114543 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.902592 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podStartSLOduration=121.902563666 podStartE2EDuration="2m1.902563666s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.901257603 +0000 UTC m=+145.662517879" watchObservedRunningTime="2026-01-29 13:18:26.902563666 +0000 UTC m=+145.663823942" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.903786 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" podStartSLOduration=121.903781116 podStartE2EDuration="2m1.903781116s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.887680681 +0000 UTC m=+145.648940957" watchObservedRunningTime="2026-01-29 13:18:26.903781116 +0000 UTC m=+145.665041392" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.915849 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-p4pq9" podStartSLOduration=121.915831518 podStartE2EDuration="2m1.915831518s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.913169862 +0000 UTC m=+145.674430138" watchObservedRunningTime="2026-01-29 13:18:26.915831518 +0000 UTC m=+145.677091794" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.936798 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" podStartSLOduration=121.936770531 podStartE2EDuration="2m1.936770531s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.934637361 +0000 UTC m=+145.695897637" watchObservedRunningTime="2026-01-29 13:18:26.936770531 +0000 UTC m=+145.698030807" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.950172 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-wx8br" podStartSLOduration=8.950148906999999 podStartE2EDuration="8.950148907s" podCreationTimestamp="2026-01-29 13:18:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.950009313 +0000 UTC m=+145.711269589" watchObservedRunningTime="2026-01-29 13:18:26.950148907 +0000 UTC m=+145.711409183" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.963448 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" podStartSLOduration=122.96342929 podStartE2EDuration="2m2.96342929s" podCreationTimestamp="2026-01-29 13:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.962694836 +0000 UTC m=+145.723955112" watchObservedRunningTime="2026-01-29 13:18:26.96342929 +0000 UTC m=+145.724689566" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.973408 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.973564 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.47354243 +0000 UTC m=+146.234802706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.973668 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:26 crc kubenswrapper[4787]: E0129 13:18:26.974054 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.474042086 +0000 UTC m=+146.235302362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.976879 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-7bxnt" podStartSLOduration=121.976847497 podStartE2EDuration="2m1.976847497s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.974303835 +0000 UTC m=+145.735564111" watchObservedRunningTime="2026-01-29 13:18:26.976847497 +0000 UTC m=+145.738107773" Jan 29 13:18:26 crc kubenswrapper[4787]: I0129 13:18:26.999296 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ccc5w" podStartSLOduration=121.999278989 podStartE2EDuration="2m1.999278989s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:26.998897416 +0000 UTC m=+145.760157692" watchObservedRunningTime="2026-01-29 13:18:26.999278989 +0000 UTC m=+145.760539265" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.037860 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-jdzgw" podStartSLOduration=122.037833036 podStartE2EDuration="2m2.037833036s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:27.036189982 +0000 UTC m=+145.797450278" watchObservedRunningTime="2026-01-29 13:18:27.037833036 +0000 UTC m=+145.799093312" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.038527 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-sxmvk" podStartSLOduration=124.038521068 podStartE2EDuration="2m4.038521068s" podCreationTimestamp="2026-01-29 13:16:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:27.017705469 +0000 UTC m=+145.778965745" watchObservedRunningTime="2026-01-29 13:18:27.038521068 +0000 UTC m=+145.799781344" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.053392 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-5bfrw" podStartSLOduration=122.053351191 podStartE2EDuration="2m2.053351191s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:27.049831327 +0000 UTC m=+145.811091603" watchObservedRunningTime="2026-01-29 13:18:27.053351191 +0000 UTC m=+145.814611467" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.074794 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.075007 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.574964036 +0000 UTC m=+146.336224322 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.075091 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.075866 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.575855155 +0000 UTC m=+146.337115451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.176610 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.176896 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.676858628 +0000 UTC m=+146.438118914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.177431 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.177818 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.677808699 +0000 UTC m=+146.439068975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.279052 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.279182 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.779150002 +0000 UTC m=+146.540410278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.279593 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.280029 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.78001918 +0000 UTC m=+146.541279456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.326279 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.329379 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.329483 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.381147 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.381356 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.881309872 +0000 UTC m=+146.642570148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.381636 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.382059 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.882050987 +0000 UTC m=+146.643311263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.483405 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.483880 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.983836925 +0000 UTC m=+146.745097221 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.484041 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.484506 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:27.984495906 +0000 UTC m=+146.745756192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.585749 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.586278 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.086257543 +0000 UTC m=+146.847517809 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.687438 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.687893 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.187868476 +0000 UTC m=+146.949128752 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.727868 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" event={"ID":"adc1fad4-a3ae-4171-9f63-1df2446d1938","Type":"ContainerStarted","Data":"78a0e3919c8e0bb74721dff21a735339d35e7bde5d97abafb58080f1e43a455b"} Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.729666 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" event={"ID":"1d559b4e-9262-4ada-8539-62bd08f3dfe4","Type":"ContainerStarted","Data":"5377d56e96031ea57c7ba871ae967bb017a212778687ef6c021f84340974ab06"} Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.732285 4787 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-57vst container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.732366 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" podUID="c146688a-141a-4102-8b8e-e496685e241f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.788849 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.789060 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.289024212 +0000 UTC m=+147.050284488 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.789220 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.789876 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.28985802 +0000 UTC m=+147.051118296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.891543 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.891656 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.391639348 +0000 UTC m=+147.152899624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.892208 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.892549 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.392542087 +0000 UTC m=+147.153802353 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.993431 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.993578 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.49354772 +0000 UTC m=+147.254807996 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:27 crc kubenswrapper[4787]: I0129 13:18:27.993693 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:27 crc kubenswrapper[4787]: E0129 13:18:27.994215 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.494205251 +0000 UTC m=+147.255465527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.095214 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.095374 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.595348468 +0000 UTC m=+147.356608744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.095586 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.095921 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.595913837 +0000 UTC m=+147.357174113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.197243 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.199970 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.699902307 +0000 UTC m=+147.461162593 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.299206 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.299607 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.799595546 +0000 UTC m=+147.560855822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.327308 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.327370 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.394375 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.394480 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.400346 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.400945 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:28.90092206 +0000 UTC m=+147.662182356 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.502544 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.503046 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.003029308 +0000 UTC m=+147.764289584 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.604067 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.604244 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.104211077 +0000 UTC m=+147.865471353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.604348 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.604758 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.104748104 +0000 UTC m=+147.866008370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.705495 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.705685 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.205643173 +0000 UTC m=+147.966903489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.705826 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.706225 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.206209541 +0000 UTC m=+147.967469817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.738272 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" event={"ID":"fa35d5d2-7890-4427-ae66-95a23c3c62fd","Type":"ContainerStarted","Data":"d180a22e6fd64dd0811700816bec9e94e7e3720a872fd0b2aeab757543bb0846"} Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.739984 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" event={"ID":"5b4fbf99-cd24-4516-a01e-e1dbbadc8c72","Type":"ContainerStarted","Data":"35525d311a540e294ea626434792627c25f98ca13bb876fff736f85f9401334e"} Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.807782 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.808113 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.308068492 +0000 UTC m=+148.069328778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.808326 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.808890 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.308866818 +0000 UTC m=+148.070127124 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.910368 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.910730 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.410680677 +0000 UTC m=+148.171941003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:28 crc kubenswrapper[4787]: I0129 13:18:28.911427 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:28 crc kubenswrapper[4787]: E0129 13:18:28.911907 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.411885976 +0000 UTC m=+148.173146252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.013150 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.013415 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.513375055 +0000 UTC m=+148.274635351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.013525 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.013987 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.513973504 +0000 UTC m=+148.275233790 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.115526 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.115746 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.61569936 +0000 UTC m=+148.376959636 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.115999 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.116382 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.616366522 +0000 UTC m=+148.377626828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.166311 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.171535 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-6hc9z" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.217217 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.217428 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.717384105 +0000 UTC m=+148.478644381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.217634 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.218308 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.718284255 +0000 UTC m=+148.479544531 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.234896 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.255933 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.261535 4787 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cncjd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.261608 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podUID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.261985 4787 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cncjd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.262009 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podUID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.277676 4787 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cncjd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.277777 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podUID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.318805 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.319957 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.819902837 +0000 UTC m=+148.581163153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.340818 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.340881 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.420653 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.421164 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:29.921140917 +0000 UTC m=+148.682401193 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.522246 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.522424 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.022395438 +0000 UTC m=+148.783655714 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.522563 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.522941 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.022934376 +0000 UTC m=+148.784194642 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.623684 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.623871 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.123833755 +0000 UTC m=+148.885094031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.623911 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.624297 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.12428662 +0000 UTC m=+148.885546896 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.725647 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.725834 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.225799239 +0000 UTC m=+148.987059515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.725895 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.726251 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.226235283 +0000 UTC m=+148.987495549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.746539 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" event={"ID":"5667f919-7345-4afa-b7bb-a89f26885147","Type":"ContainerStarted","Data":"a39a96fb853c42268bf3cc682b4a9ccfa6855cacfa44008b0176464855376074"} Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.749304 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pgssf" event={"ID":"235b8550-a08d-49cf-afcd-185b307d4db9","Type":"ContainerStarted","Data":"16c6a8fd27a0d72d2e23bc6a93dcc739aafe5ef7287c5c585961b3f9b2b5d571"} Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.827088 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.827321 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.327274237 +0000 UTC m=+149.088534523 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.827562 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.828183 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.328172086 +0000 UTC m=+149.089432562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.929398 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.929650 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.429612753 +0000 UTC m=+149.190873029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:29 crc kubenswrapper[4787]: I0129 13:18:29.929989 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:29 crc kubenswrapper[4787]: E0129 13:18:29.930376 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.430358227 +0000 UTC m=+149.191618503 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.032418 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.032641 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.53260752 +0000 UTC m=+149.293867796 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.032799 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.033194 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.533185369 +0000 UTC m=+149.294445645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.134499 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.134721 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.634691218 +0000 UTC m=+149.395951494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.135004 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.135376 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.63536813 +0000 UTC m=+149.396628406 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.231355 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.236432 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.236691 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.736649932 +0000 UTC m=+149.497910198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.236870 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.237316 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.737304573 +0000 UTC m=+149.498565029 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.265800 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.265898 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.267311 4787 patch_prober.go:28] interesting pod/console-f9d7485db-5v5vz container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.267370 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5v5vz" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.284374 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.284410 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.284490 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.284559 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.327396 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.327511 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.338808 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.339083 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.83904218 +0000 UTC m=+149.600302466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.339231 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.341356 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.841332425 +0000 UTC m=+149.602592841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.441080 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.441593 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:30.941567602 +0000 UTC m=+149.702827878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.542898 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.543370 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.04335019 +0000 UTC m=+149.804610466 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.568788 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-5wvf8" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.644159 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.644420 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.144377713 +0000 UTC m=+149.905637999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.644614 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.645079 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.145062906 +0000 UTC m=+149.906323182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.691740 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-wcqwh" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.696720 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.696793 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.698477 4787 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-2qqb8 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.15:8443/livez\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.698521 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" podUID="65ea8869-3303-4888-bae4-3be58d5097b9" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.15:8443/livez\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.746059 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.746374 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.246317777 +0000 UTC m=+150.007578113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.746911 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.747322 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.247304359 +0000 UTC m=+150.008564635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.773944 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-4p5mp" podStartSLOduration=125.773915696 podStartE2EDuration="2m5.773915696s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:30.76974337 +0000 UTC m=+149.531003646" watchObservedRunningTime="2026-01-29 13:18:30.773915696 +0000 UTC m=+149.535175982" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.805250 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-fbnq6" podStartSLOduration=125.805228797 podStartE2EDuration="2m5.805228797s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:30.803291464 +0000 UTC m=+149.564551750" watchObservedRunningTime="2026-01-29 13:18:30.805228797 +0000 UTC m=+149.566489073" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.824977 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qtfv6" podStartSLOduration=125.82495574000001 podStartE2EDuration="2m5.82495574s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:30.822675126 +0000 UTC m=+149.583935402" watchObservedRunningTime="2026-01-29 13:18:30.82495574 +0000 UTC m=+149.586216016" Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.848191 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.849620 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.349598683 +0000 UTC m=+150.110858959 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:30 crc kubenswrapper[4787]: I0129 13:18:30.950639 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:30 crc kubenswrapper[4787]: E0129 13:18:30.951275 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.451247127 +0000 UTC m=+150.212507393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.051791 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.052061 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.552021832 +0000 UTC m=+150.313282118 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.052438 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.052902 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.55287664 +0000 UTC m=+150.314137116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.153778 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.154306 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.654283526 +0000 UTC m=+150.415543802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.179675 4787 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-57vst container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.179773 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" podUID="c146688a-141a-4102-8b8e-e496685e241f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.179791 4787 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-57vst container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.179874 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" podUID="c146688a-141a-4102-8b8e-e496685e241f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.206213 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.207806 4787 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnrc8 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.207850 4787 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnrc8 container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.207896 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" podUID="032eaa5a-856b-4af2-82cf-a9dbd690af10" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.207897 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" podUID="032eaa5a-856b-4af2-82cf-a9dbd690af10" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.208285 4787 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wnrc8 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.208314 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" podUID="032eaa5a-856b-4af2-82cf-a9dbd690af10" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.257269 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.257721 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.757706487 +0000 UTC m=+150.518966763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.327209 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.331100 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.331150 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.359166 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.359395 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.85935692 +0000 UTC m=+150.620617186 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.359735 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.360366 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.860335222 +0000 UTC m=+150.621595638 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.461124 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.461367 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.961335414 +0000 UTC m=+150.722595700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.461571 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.461912 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:31.961904873 +0000 UTC m=+150.723165149 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.492516 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.493940 4787 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fxcnw container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.493938 4787 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fxcnw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.493993 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.494088 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.494383 4787 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fxcnw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.494413 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.563548 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.563722 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.063688371 +0000 UTC m=+150.824948647 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.563803 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.565566 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.065547041 +0000 UTC m=+150.826807327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.665184 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.665424 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.165383546 +0000 UTC m=+150.926643822 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.665620 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.665967 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.165958915 +0000 UTC m=+150.927219191 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.740962 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.741940 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.746074 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.747431 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.759050 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.766209 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.766511 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.766582 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.766689 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.266674748 +0000 UTC m=+151.027935024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.807040 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-f5b6g" podStartSLOduration=126.807017663 podStartE2EDuration="2m6.807017663s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:31.806032711 +0000 UTC m=+150.567292997" watchObservedRunningTime="2026-01-29 13:18:31.807017663 +0000 UTC m=+150.568277939" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.848541 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-ld62n" podStartSLOduration=126.848517746 podStartE2EDuration="2m6.848517746s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:31.828489893 +0000 UTC m=+150.589750169" watchObservedRunningTime="2026-01-29 13:18:31.848517746 +0000 UTC m=+150.609778022" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.850195 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-pgssf" podStartSLOduration=13.85018293 podStartE2EDuration="13.85018293s" podCreationTimestamp="2026-01-29 13:18:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:31.84741422 +0000 UTC m=+150.608674506" watchObservedRunningTime="2026-01-29 13:18:31.85018293 +0000 UTC m=+150.611443206" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868028 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868202 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868266 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868336 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868386 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868426 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868482 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.868592 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.870073 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.370049998 +0000 UTC m=+151.131310264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.873523 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.877938 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.878181 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.884250 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.900543 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.970051 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:31 crc kubenswrapper[4787]: E0129 13:18:31.970718 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.470694429 +0000 UTC m=+151.231954705 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:31 crc kubenswrapper[4787]: I0129 13:18:31.998436 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.039396 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.062071 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.072708 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.073158 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.573143179 +0000 UTC m=+151.334403455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.083845 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.173434 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.173848 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.673828051 +0000 UTC m=+151.435088327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.268421 4787 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cncjd container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.268494 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podUID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.276557 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.284251 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.78422484 +0000 UTC m=+151.545485116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.286680 4787 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cncjd container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.286741 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" podUID="0d35f531-65d6-4292-96f9-c5b3d4e31982" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.398181 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.398798 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:32.898768654 +0000 UTC m=+151.660028930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.502416 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.502780 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.002767384 +0000 UTC m=+151.764027660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.558296 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 13:18:32 crc kubenswrapper[4787]: [-]has-synced failed: reason withheld Jan 29 13:18:32 crc kubenswrapper[4787]: [+]process-running ok Jan 29 13:18:32 crc kubenswrapper[4787]: healthz check failed Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.558387 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 13:18:32 crc kubenswrapper[4787]: W0129 13:18:32.572015 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-5946813e5cab1095dcdef2081e02d1d5e62ac53244497134bd9c079e151939a7 WatchSource:0}: Error finding container 5946813e5cab1095dcdef2081e02d1d5e62ac53244497134bd9c079e151939a7: Status 404 returned error can't find the container with id 5946813e5cab1095dcdef2081e02d1d5e62ac53244497134bd9c079e151939a7 Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.603992 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.604124 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.104096707 +0000 UTC m=+151.865356983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.604250 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.604703 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.104693876 +0000 UTC m=+151.865954152 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.645779 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.705602 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.705841 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.205822243 +0000 UTC m=+151.967082509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.772297 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5946813e5cab1095dcdef2081e02d1d5e62ac53244497134bd9c079e151939a7"} Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.773631 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"907a82cc-8a16-4984-a905-b12cbf43e2f6","Type":"ContainerStarted","Data":"9720222f495a130a37fae385d02b9f790a20e17737f7f0955465fbcae3a952bc"} Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.775483 4787 generic.go:334] "Generic (PLEG): container finished" podID="8a190e8a-8a77-4af5-84ae-83976f1f88d1" containerID="a5bc2bf57f2925912ac6ffb7508963173f28a9303b0c91e717294a992b8436ca" exitCode=0 Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.775579 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" event={"ID":"8a190e8a-8a77-4af5-84ae-83976f1f88d1","Type":"ContainerDied","Data":"a5bc2bf57f2925912ac6ffb7508963173f28a9303b0c91e717294a992b8436ca"} Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.776432 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"dc0e8ff49b1c3fc013fc220bd321257fbcad61c6f8b04b5e3cc247c6213cecaa"} Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.777373 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"bdbb234abaa46ef388233a32b6e7279fe8d22b2b5f9660c2beba6e1ae44a0ec7"} Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.810864 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.811167 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.311144527 +0000 UTC m=+152.072404973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.912808 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.913000 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.412962916 +0000 UTC m=+152.174223192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:32 crc kubenswrapper[4787]: I0129 13:18:32.913637 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:32 crc kubenswrapper[4787]: E0129 13:18:32.914010 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.413993999 +0000 UTC m=+152.175254275 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.014869 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.015177 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.515143457 +0000 UTC m=+152.276403733 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.116895 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.117373 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.617357059 +0000 UTC m=+152.378617335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.217692 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.217850 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.717824684 +0000 UTC m=+152.479084960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.217938 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.218260 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.718252548 +0000 UTC m=+152.479512824 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.254004 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.318542 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.318750 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.818710803 +0000 UTC m=+152.579971079 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.318813 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.319212 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.819195958 +0000 UTC m=+152.580456234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.332037 4787 patch_prober.go:28] interesting pod/router-default-5444994796-tvvl4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 29 13:18:33 crc kubenswrapper[4787]: [+]has-synced ok Jan 29 13:18:33 crc kubenswrapper[4787]: [+]process-running ok Jan 29 13:18:33 crc kubenswrapper[4787]: healthz check failed Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.332120 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-tvvl4" podUID="9e81570f-d024-4efc-a6cb-5c6c37338623" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.381372 4787 csr.go:261] certificate signing request csr-jwnhb is approved, waiting to be issued Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.389301 4787 csr.go:257] certificate signing request csr-jwnhb is issued Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.420097 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.420479 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:33.920439669 +0000 UTC m=+152.681699945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.522637 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.523020 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.023007622 +0000 UTC m=+152.784267898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.623658 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.623969 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.123933122 +0000 UTC m=+152.885193398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.624415 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.624955 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.124937065 +0000 UTC m=+152.886197521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.726272 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.726508 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.226475075 +0000 UTC m=+152.987735351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.726603 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.727039 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.227020143 +0000 UTC m=+152.988280419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.785142 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"fbcc3ca1641abb80bb26c4d08cab5796f760a33a294aa4622ef5531501769c3b"} Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.785554 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.787150 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"907a82cc-8a16-4984-a905-b12cbf43e2f6","Type":"ContainerStarted","Data":"228e6d88c1f671e2003381c7aba7a6c6b22dbc8a3b9ac10dcf1a00cf82945286"} Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.789578 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9defa21009587824fdbf5a8a5fa04a4b4ec68dee43a26906da24b55a41548823"} Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.792261 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5d797c9d3aef88b5fc048d2275685b359b2431033719769efd53464e196ebe24"} Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.794594 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" event={"ID":"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9","Type":"ContainerStarted","Data":"d33170262d2b399ee5eeee84698cc656811b5ff7ff0670c8dd6ccadb285f91ec"} Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.827950 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.828215 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.32815144 +0000 UTC m=+153.089411716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.828830 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.829388 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.329362599 +0000 UTC m=+153.090622875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.930806 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.930947 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.43092126 +0000 UTC m=+153.192181546 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:33 crc kubenswrapper[4787]: I0129 13:18:33.931280 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:33 crc kubenswrapper[4787]: E0129 13:18:33.931706 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.431694395 +0000 UTC m=+153.192954671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.031889 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.03186253 podStartE2EDuration="3.03186253s" podCreationTimestamp="2026-01-29 13:18:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:34.02941009 +0000 UTC m=+152.790670366" watchObservedRunningTime="2026-01-29 13:18:34.03186253 +0000 UTC m=+152.793122806" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.035086 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.035481 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.535429347 +0000 UTC m=+153.296689623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.136558 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.137060 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.637034699 +0000 UTC m=+153.398294975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.242363 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.242877 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.742837608 +0000 UTC m=+153.504097884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.340999 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.344559 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.344970 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.844954417 +0000 UTC m=+153.606214693 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.347152 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-tvvl4" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.390544 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-29 13:13:33 +0000 UTC, rotation deadline is 2026-10-27 11:06:38.950631771 +0000 UTC Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.390600 4787 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6501h48m4.560036986s for next certificate rotation Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.445831 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.447294 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:34.947262472 +0000 UTC m=+153.708522748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.449611 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.547561 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume\") pod \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.547678 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume\") pod \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.547907 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qr42\" (UniqueName: \"kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42\") pod \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\" (UID: \"8a190e8a-8a77-4af5-84ae-83976f1f88d1\") " Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.548159 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.548703 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.048679178 +0000 UTC m=+153.809939454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.548939 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume" (OuterVolumeSpecName: "config-volume") pod "8a190e8a-8a77-4af5-84ae-83976f1f88d1" (UID: "8a190e8a-8a77-4af5-84ae-83976f1f88d1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.566728 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42" (OuterVolumeSpecName: "kube-api-access-5qr42") pod "8a190e8a-8a77-4af5-84ae-83976f1f88d1" (UID: "8a190e8a-8a77-4af5-84ae-83976f1f88d1"). InnerVolumeSpecName "kube-api-access-5qr42". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.567288 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8a190e8a-8a77-4af5-84ae-83976f1f88d1" (UID: "8a190e8a-8a77-4af5-84ae-83976f1f88d1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.649867 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.650070 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.150042572 +0000 UTC m=+153.911302848 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.650242 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.650333 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qr42\" (UniqueName: \"kubernetes.io/projected/8a190e8a-8a77-4af5-84ae-83976f1f88d1-kube-api-access-5qr42\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.650346 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a190e8a-8a77-4af5-84ae-83976f1f88d1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.650356 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a190e8a-8a77-4af5-84ae-83976f1f88d1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.650653 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.150641532 +0000 UTC m=+153.911901808 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.751278 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.751677 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.251659035 +0000 UTC m=+154.012919311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.808025 4787 generic.go:334] "Generic (PLEG): container finished" podID="907a82cc-8a16-4984-a905-b12cbf43e2f6" containerID="228e6d88c1f671e2003381c7aba7a6c6b22dbc8a3b9ac10dcf1a00cf82945286" exitCode=0 Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.808092 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"907a82cc-8a16-4984-a905-b12cbf43e2f6","Type":"ContainerDied","Data":"228e6d88c1f671e2003381c7aba7a6c6b22dbc8a3b9ac10dcf1a00cf82945286"} Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.811635 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" event={"ID":"8a190e8a-8a77-4af5-84ae-83976f1f88d1","Type":"ContainerDied","Data":"3b60ec040ad6c0b3223c8dafbc936b5d7bd12099187775a11bc8820bb700c33c"} Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.811675 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b60ec040ad6c0b3223c8dafbc936b5d7bd12099187775a11bc8820bb700c33c" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.811929 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.853313 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.853575 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.853886 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a190e8a-8a77-4af5-84ae-83976f1f88d1" containerName="collect-profiles" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.853912 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a190e8a-8a77-4af5-84ae-83976f1f88d1" containerName="collect-profiles" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.854052 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a190e8a-8a77-4af5-84ae-83976f1f88d1" containerName="collect-profiles" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.858630 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.858982 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.358949472 +0000 UTC m=+154.120209748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.862815 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.863247 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.892605 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.954322 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.954540 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:34 crc kubenswrapper[4787]: E0129 13:18:34.954623 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.454580139 +0000 UTC m=+154.215840415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:34 crc kubenswrapper[4787]: I0129 13:18:34.954690 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.056214 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.056274 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.056370 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.056467 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.056820 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.556807451 +0000 UTC m=+154.318067727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.077426 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.157534 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.158020 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.65799643 +0000 UTC m=+154.419256706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.194775 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.259186 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.259663 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.759645264 +0000 UTC m=+154.520905550 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.268142 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cncjd" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.360351 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.361747 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.861723281 +0000 UTC m=+154.622983557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.461891 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.462261 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:35.962245288 +0000 UTC m=+154.723505564 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.562425 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.562910 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.06261847 +0000 UTC m=+154.823878746 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.562969 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.563357 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.063350524 +0000 UTC m=+154.824610800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.608488 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.609602 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.612300 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.624510 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.663820 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.663988 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m2tn\" (UniqueName: \"kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.664045 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.164013416 +0000 UTC m=+154.925273692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.664104 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.664218 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.664441 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.664846 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.164827432 +0000 UTC m=+154.926087708 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.707960 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 29 13:18:35 crc kubenswrapper[4787]: W0129 13:18:35.710860 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod01edeb2c_055a_4c37_af34_2f7229a3880e.slice/crio-a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73 WatchSource:0}: Error finding container a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73: Status 404 returned error can't find the container with id a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73 Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.765550 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.765798 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.265749562 +0000 UTC m=+155.027009838 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.766259 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.766319 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m2tn\" (UniqueName: \"kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.766344 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.766368 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.766775 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.266764445 +0000 UTC m=+155.028024721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.766922 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.767358 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.799896 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.803955 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.804804 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m2tn\" (UniqueName: \"kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn\") pod \"community-operators-pww2l\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.814552 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2qqb8" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.814705 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.819904 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.821770 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.837927 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"01edeb2c-055a-4c37-af34-2f7229a3880e","Type":"ContainerStarted","Data":"a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73"} Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.867227 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.867711 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.367682955 +0000 UTC m=+155.128943231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.867758 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.867820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.867849 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnl7g\" (UniqueName: \"kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.868127 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.869381 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.369342489 +0000 UTC m=+155.130602765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.935915 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.973486 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.974610 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.474570909 +0000 UTC m=+155.235831185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.977348 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.977390 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnl7g\" (UniqueName: \"kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.977505 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.977586 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.978099 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:35 crc kubenswrapper[4787]: E0129 13:18:35.978423 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.478409704 +0000 UTC m=+155.239669980 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:35 crc kubenswrapper[4787]: I0129 13:18:35.978812 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.010940 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnl7g\" (UniqueName: \"kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g\") pod \"certified-operators-z6slf\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.022980 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.025237 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.030849 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.078596 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.079242 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.578778886 +0000 UTC m=+155.340039162 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.079327 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.079370 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.079396 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.079693 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgdjb\" (UniqueName: \"kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.079733 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.579717837 +0000 UTC m=+155.340978113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.140084 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.180656 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.181042 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgdjb\" (UniqueName: \"kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.181123 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.181158 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.181629 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.181714 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.681696981 +0000 UTC m=+155.442957257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.183003 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.209279 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.211029 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.211356 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="907a82cc-8a16-4984-a905-b12cbf43e2f6" containerName="pruner" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.211375 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="907a82cc-8a16-4984-a905-b12cbf43e2f6" containerName="pruner" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.215658 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="907a82cc-8a16-4984-a905-b12cbf43e2f6" containerName="pruner" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.216683 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.217529 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgdjb\" (UniqueName: \"kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb\") pod \"community-operators-wxvjz\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.255237 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.315934 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.316505 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-pgssf" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.317166 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.817121636 +0000 UTC m=+155.578381902 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.376997 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.423008 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access\") pod \"907a82cc-8a16-4984-a905-b12cbf43e2f6\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.428540 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.428655 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir\") pod \"907a82cc-8a16-4984-a905-b12cbf43e2f6\" (UID: \"907a82cc-8a16-4984-a905-b12cbf43e2f6\") " Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.428850 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.928810307 +0000 UTC m=+155.690070583 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.428907 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.428854 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "907a82cc-8a16-4984-a905-b12cbf43e2f6" (UID: "907a82cc-8a16-4984-a905-b12cbf43e2f6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.429576 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.429693 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.429712 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsqb5\" (UniqueName: \"kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.430806 4787 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/907a82cc-8a16-4984-a905-b12cbf43e2f6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.431091 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:36.931083571 +0000 UTC m=+155.692343847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.448971 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "907a82cc-8a16-4984-a905-b12cbf43e2f6" (UID: "907a82cc-8a16-4984-a905-b12cbf43e2f6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.513918 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.535496 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.535909 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.035877887 +0000 UTC m=+155.797138163 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536095 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536140 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536162 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsqb5\" (UniqueName: \"kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536205 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536263 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/907a82cc-8a16-4984-a905-b12cbf43e2f6-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536668 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.536924 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.537015 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.037001484 +0000 UTC m=+155.798261760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.575760 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsqb5\" (UniqueName: \"kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5\") pod \"certified-operators-xp748\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.615829 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.640725 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.641096 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.141079957 +0000 UTC m=+155.902340233 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.742841 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.743687 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.243670581 +0000 UTC m=+156.004930857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.815676 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.844802 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.845283 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.345259883 +0000 UTC m=+156.106520159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.863558 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.863643 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"907a82cc-8a16-4984-a905-b12cbf43e2f6","Type":"ContainerDied","Data":"9720222f495a130a37fae385d02b9f790a20e17737f7f0955465fbcae3a952bc"} Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.863696 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9720222f495a130a37fae385d02b9f790a20e17737f7f0955465fbcae3a952bc" Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.864937 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerStarted","Data":"81111c33c9d9d93651fafb4e656625efef22a664f9bf523ebfba9d78f968a223"} Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.934282 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.934388 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:18:36 crc kubenswrapper[4787]: I0129 13:18:36.946265 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:36 crc kubenswrapper[4787]: E0129 13:18:36.947729 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.447705782 +0000 UTC m=+156.208966058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:36 crc kubenswrapper[4787]: W0129 13:18:36.970635 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52138722_381d_473d_85ab_f4961a18819c.slice/crio-e1f67774061c3891dd5207465f0d66439d8bb120b4147a2cebf554a1bcaaf2fc WatchSource:0}: Error finding container e1f67774061c3891dd5207465f0d66439d8bb120b4147a2cebf554a1bcaaf2fc: Status 404 returned error can't find the container with id e1f67774061c3891dd5207465f0d66439d8bb120b4147a2cebf554a1bcaaf2fc Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.048377 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.048576 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.54854702 +0000 UTC m=+156.309807296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.048623 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.049008 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.548991214 +0000 UTC m=+156.310251490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.149830 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.150301 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.650284556 +0000 UTC m=+156.411544832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.251581 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.252134 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.752114226 +0000 UTC m=+156.513374502 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.354294 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.354789 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.854756902 +0000 UTC m=+156.616017178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.456126 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.456526 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:37.956510179 +0000 UTC m=+156.717770455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.557280 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.557616 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.057590914 +0000 UTC m=+156.818851210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.659167 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.659599 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.159582749 +0000 UTC m=+156.920843035 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.760199 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.760687 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.260650363 +0000 UTC m=+157.021910649 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.806356 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.808556 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.812911 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.823610 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.884638 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.884737 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.884803 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsprw\" (UniqueName: \"kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.884929 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.885077 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.385050799 +0000 UTC m=+157.146311065 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.890396 4787 generic.go:334] "Generic (PLEG): container finished" podID="52138722-381d-473d-85ab-f4961a18819c" containerID="54822d283b11626e3eaccceb5d490f6a52e936d1f32faaea0fed45b527abcfef" exitCode=0 Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.890541 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerDied","Data":"54822d283b11626e3eaccceb5d490f6a52e936d1f32faaea0fed45b527abcfef"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.890571 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerStarted","Data":"e1f67774061c3891dd5207465f0d66439d8bb120b4147a2cebf554a1bcaaf2fc"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.892281 4787 generic.go:334] "Generic (PLEG): container finished" podID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerID="992db4ba1869a0e959a871fd9e722f0037dacb6fcb8a59531ab749b8dc522153" exitCode=0 Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.892390 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerDied","Data":"992db4ba1869a0e959a871fd9e722f0037dacb6fcb8a59531ab749b8dc522153"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.894836 4787 generic.go:334] "Generic (PLEG): container finished" podID="60be26cc-9957-4401-85dd-7572bb78975f" containerID="5cfeebd696c1b311d561af7fe0c7b617b8ef52aa403aa1e6bf8d0041511fb560" exitCode=0 Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.894937 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerDied","Data":"5cfeebd696c1b311d561af7fe0c7b617b8ef52aa403aa1e6bf8d0041511fb560"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.894972 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerStarted","Data":"007a2f606eab62df25077c8960ce65ec89fb1165ad16e760eee8c6d0fe80346d"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.896355 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.897914 4787 generic.go:334] "Generic (PLEG): container finished" podID="01edeb2c-055a-4c37-af34-2f7229a3880e" containerID="c3560c6351a839d203a49f5e536089bbd71629835c72e0a7e027cba8de7c8712" exitCode=0 Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.897974 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"01edeb2c-055a-4c37-af34-2f7229a3880e","Type":"ContainerDied","Data":"c3560c6351a839d203a49f5e536089bbd71629835c72e0a7e027cba8de7c8712"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.906269 4787 generic.go:334] "Generic (PLEG): container finished" podID="2236dfad-b7be-4375-9661-287dbeeec969" containerID="b54618db470368026ad0b49856b86067e7bc6dcec9f86f923837605b27ddd844" exitCode=0 Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.906358 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerDied","Data":"b54618db470368026ad0b49856b86067e7bc6dcec9f86f923837605b27ddd844"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.906396 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerStarted","Data":"38606333fdc4c6888616addf0cdb0f0024c8e95c3289b7c26ba4387ad6735f6e"} Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.987239 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.987686 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.987777 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.487752047 +0000 UTC m=+157.249012323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.987835 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.987872 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.987920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsprw\" (UniqueName: \"kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: E0129 13:18:37.988959 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.488951406 +0000 UTC m=+157.250211682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.989602 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:37 crc kubenswrapper[4787]: I0129 13:18:37.989892 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.011287 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsprw\" (UniqueName: \"kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw\") pod \"redhat-marketplace-n44gv\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.089346 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.089532 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.589501763 +0000 UTC m=+157.350762049 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.089927 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.090412 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.590401243 +0000 UTC m=+157.351661519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.191629 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.191892 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.69185073 +0000 UTC m=+157.453111016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.192033 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.192522 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.692503461 +0000 UTC m=+157.453763727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.210045 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.211183 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.230257 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.292539 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.292692 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.292775 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlwvh\" (UniqueName: \"kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.292818 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.292963 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.792937205 +0000 UTC m=+157.554197491 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.305584 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.394741 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlwvh\" (UniqueName: \"kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.395127 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.395184 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.395216 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.395711 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.395783 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.895758467 +0000 UTC m=+157.657018743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.395808 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.420362 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlwvh\" (UniqueName: \"kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh\") pod \"redhat-marketplace-j7bsl\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.497330 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.497782 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:38.99772986 +0000 UTC m=+157.758990146 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.519072 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:18:38 crc kubenswrapper[4787]: W0129 13:18:38.532352 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f791725_08e7_42f5_b0ee_cd67dfc1fc1b.slice/crio-b708c0db648ee72703c34a208925026c96070a45c038227be94b2a2357f72052 WatchSource:0}: Error finding container b708c0db648ee72703c34a208925026c96070a45c038227be94b2a2357f72052: Status 404 returned error can't find the container with id b708c0db648ee72703c34a208925026c96070a45c038227be94b2a2357f72052 Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.536943 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.598928 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.599373 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.099353773 +0000 UTC m=+157.860614049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.700840 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.701221 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.201205173 +0000 UTC m=+157.962465449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.801825 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.802195 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.302182955 +0000 UTC m=+158.063443231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.803750 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.808709 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.814316 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.817950 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.902741 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.902965 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.903043 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kbxs\" (UniqueName: \"kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.903088 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:38 crc kubenswrapper[4787]: E0129 13:18:38.903236 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.403215698 +0000 UTC m=+158.164475974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:38 crc kubenswrapper[4787]: I0129 13:18:38.915069 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerStarted","Data":"b708c0db648ee72703c34a208925026c96070a45c038227be94b2a2357f72052"} Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.004851 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.004903 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kbxs\" (UniqueName: \"kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.004928 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.004979 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.005384 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.505366468 +0000 UTC m=+158.266626734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.005570 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.005662 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.050200 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kbxs\" (UniqueName: \"kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs\") pod \"redhat-operators-87tmc\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.090292 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.105429 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.105812 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.605791972 +0000 UTC m=+158.367052248 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.130360 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.202563 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.203856 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.207092 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.207584 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.70756578 +0000 UTC m=+158.468826066 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.218356 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.286245 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308178 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access\") pod \"01edeb2c-055a-4c37-af34-2f7229a3880e\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308237 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir\") pod \"01edeb2c-055a-4c37-af34-2f7229a3880e\" (UID: \"01edeb2c-055a-4c37-af34-2f7229a3880e\") " Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308338 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308500 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308532 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqvsf\" (UniqueName: \"kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.308577 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.310383 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.810361251 +0000 UTC m=+158.571621527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.310732 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "01edeb2c-055a-4c37-af34-2f7229a3880e" (UID: "01edeb2c-055a-4c37-af34-2f7229a3880e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.330638 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "01edeb2c-055a-4c37-af34-2f7229a3880e" (UID: "01edeb2c-055a-4c37-af34-2f7229a3880e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.409830 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqvsf\" (UniqueName: \"kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.409903 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.409955 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.412680 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.413039 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:39.913023297 +0000 UTC m=+158.674283573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.414191 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.421019 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.421205 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/01edeb2c-055a-4c37-af34-2f7229a3880e-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.421227 4787 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/01edeb2c-055a-4c37-af34-2f7229a3880e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.437062 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqvsf\" (UniqueName: \"kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf\") pod \"redhat-operators-fzxtm\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.478178 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.521498 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.521664 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.021630888 +0000 UTC m=+158.782891164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.521979 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.522384 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.022374672 +0000 UTC m=+158.783634948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.577061 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.622820 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.623027 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.122990732 +0000 UTC m=+158.884251008 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.623977 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.624389 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.124366987 +0000 UTC m=+158.885627263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.725584 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.725839 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.225792623 +0000 UTC m=+158.987052899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.726369 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.726841 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.226830837 +0000 UTC m=+158.988091113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.827246 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.827446 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.327410896 +0000 UTC m=+159.088671172 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.827536 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.827939 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.327931283 +0000 UTC m=+159.089191559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.831070 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:18:39 crc kubenswrapper[4787]: W0129 13:18:39.834806 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf99e00f_9a78_454e_9f8e_5da684f374a1.slice/crio-9017cfe0fb14b91c0fd5cb9a243bacb6145cca29d5a12a0e5e54678d1e13a523 WatchSource:0}: Error finding container 9017cfe0fb14b91c0fd5cb9a243bacb6145cca29d5a12a0e5e54678d1e13a523: Status 404 returned error can't find the container with id 9017cfe0fb14b91c0fd5cb9a243bacb6145cca29d5a12a0e5e54678d1e13a523 Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.928225 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.928643 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.928910 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.428885324 +0000 UTC m=+159.190145600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.928538 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"01edeb2c-055a-4c37-af34-2f7229a3880e","Type":"ContainerDied","Data":"a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73"} Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.928946 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2072ed5a06e67e2b91ea2bb5b0bd8ea101d777200952e6074c267ab02a73e73" Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.929727 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:39 crc kubenswrapper[4787]: E0129 13:18:39.930348 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.430330091 +0000 UTC m=+159.191590367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.931581 4787 generic.go:334] "Generic (PLEG): container finished" podID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerID="9948c0737e301ff7f0cf74c7324a936e7ac5be3e503b103d06bf9e60af0a059c" exitCode=0 Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.931782 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerDied","Data":"9948c0737e301ff7f0cf74c7324a936e7ac5be3e503b103d06bf9e60af0a059c"} Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.935814 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerStarted","Data":"9017cfe0fb14b91c0fd5cb9a243bacb6145cca29d5a12a0e5e54678d1e13a523"} Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.941717 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerStarted","Data":"aaac0045a3b347295814ced7f38172234f51a9e88194a2aed65a71aa7c72abd3"} Jan 29 13:18:39 crc kubenswrapper[4787]: I0129 13:18:39.943419 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerStarted","Data":"a7491a4c10a12ece046afd1c3bc58217a1393d9558ca9fa89a23e128f664379b"} Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.031686 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.031896 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.531863561 +0000 UTC m=+159.293123837 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.032309 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.032814 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.532805272 +0000 UTC m=+159.294065548 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.134081 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.134359 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.634317451 +0000 UTC m=+159.395577727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.134466 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.134856 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.634848698 +0000 UTC m=+159.396108974 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.236091 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.236292 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.736264834 +0000 UTC m=+159.497525100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.237249 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.237873 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.737842146 +0000 UTC m=+159.499102412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.284732 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.284793 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.284816 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.284864 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.338588 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.338841 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.838779596 +0000 UTC m=+159.600039882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.339149 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.339540 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.839531881 +0000 UTC m=+159.600792157 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.437876 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.440359 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.440562 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.940535583 +0000 UTC m=+159.701795869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.440726 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.441127 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:40.941113412 +0000 UTC m=+159.702373688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.442250 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.542959 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.543113 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.043087716 +0000 UTC m=+159.804347992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.543364 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.543836 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.04382553 +0000 UTC m=+159.805085806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.644794 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.645272 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.145245626 +0000 UTC m=+159.906505902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.759007 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.759618 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.259606414 +0000 UTC m=+160.020866690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.860865 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.861393 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.361365052 +0000 UTC m=+160.122625338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.861548 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.862008 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.361998282 +0000 UTC m=+160.123258568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.954954 4787 generic.go:334] "Generic (PLEG): container finished" podID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerID="cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c" exitCode=0 Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.955087 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerDied","Data":"cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c"} Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.958719 4787 generic.go:334] "Generic (PLEG): container finished" podID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerID="45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72" exitCode=0 Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.958815 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerDied","Data":"45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72"} Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.962747 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.962992 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.462944183 +0000 UTC m=+160.224204459 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.963116 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:40 crc kubenswrapper[4787]: E0129 13:18:40.963656 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.463644446 +0000 UTC m=+160.224904722 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.964564 4787 generic.go:334] "Generic (PLEG): container finished" podID="d376a31e-47be-4275-a440-5a961fb875d3" containerID="67da766086ebad503cef99a95012903e7b8add2d9a3444ac4396fc56b2a0e06a" exitCode=0 Jan 29 13:18:40 crc kubenswrapper[4787]: I0129 13:18:40.964655 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerDied","Data":"67da766086ebad503cef99a95012903e7b8add2d9a3444ac4396fc56b2a0e06a"} Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.064886 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.065094 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.565055882 +0000 UTC m=+160.326316168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.065555 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.066588 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.56654408 +0000 UTC m=+160.327804356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.167055 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.167409 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.667371317 +0000 UTC m=+160.428631593 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.167879 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.168358 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.668334798 +0000 UTC m=+160.429595074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.209617 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wnrc8" Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.263741 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-57vst" Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.270599 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.270821 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.770797688 +0000 UTC m=+160.532057964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.270893 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.272300 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.772292597 +0000 UTC m=+160.533552873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.372830 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.373560 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.873534107 +0000 UTC m=+160.634794383 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.475542 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.476176 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:41.976149312 +0000 UTC m=+160.737409758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.497742 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.593221 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.593702 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.093688284 +0000 UTC m=+160.854948560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.695013 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.697018 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.197001492 +0000 UTC m=+160.958261768 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.796710 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.797202 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.297160417 +0000 UTC m=+161.058420693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.797376 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.797915 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.297894331 +0000 UTC m=+161.059154607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.900105 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.900379 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.40033479 +0000 UTC m=+161.161595066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.900830 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:41 crc kubenswrapper[4787]: E0129 13:18:41.901298 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.401288851 +0000 UTC m=+161.162549137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:41 crc kubenswrapper[4787]: I0129 13:18:41.978402 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" event={"ID":"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9","Type":"ContainerStarted","Data":"276f1c72e8ab9cc3227c65913905e954a1450f1b42483fc9c45b657cf1fdbea4"} Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.003960 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.004400 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.504356101 +0000 UTC m=+161.265616377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.105589 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.106140 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.606115048 +0000 UTC m=+161.367375324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.206986 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.207208 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.707172662 +0000 UTC m=+161.468432938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.207242 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.207762 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.707752661 +0000 UTC m=+161.469012937 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.308875 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.309360 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.809303761 +0000 UTC m=+161.570564037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.309447 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.310134 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.810113868 +0000 UTC m=+161.571374144 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.410929 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.411148 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.91111373 +0000 UTC m=+161.672374006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.411236 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.411782 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:42.911764992 +0000 UTC m=+161.673025268 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.512553 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.512731 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.012707092 +0000 UTC m=+161.773967368 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.513057 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.513544 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.013525269 +0000 UTC m=+161.774785545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.613952 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.614217 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.114195281 +0000 UTC m=+161.875455557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.655935 4787 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.715564 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.716024 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.21600935 +0000 UTC m=+161.977269626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.818232 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.818506 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.3184789 +0000 UTC m=+162.079739176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.818726 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.819105 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.31909015 +0000 UTC m=+162.080350416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.919718 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.920938 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.42091202 +0000 UTC m=+162.182172296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:42 crc kubenswrapper[4787]: I0129 13:18:42.921004 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:42 crc kubenswrapper[4787]: E0129 13:18:42.921575 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.42153745 +0000 UTC m=+162.182797726 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.001554 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" event={"ID":"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9","Type":"ContainerStarted","Data":"d3c39e3e9b0750035b701f8338f810df5f30da3761800dd39633db5b62004af7"} Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.022600 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:43 crc kubenswrapper[4787]: E0129 13:18:43.022767 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.522727499 +0000 UTC m=+162.283987775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.022994 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:43 crc kubenswrapper[4787]: E0129 13:18:43.023386 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.52337763 +0000 UTC m=+162.284637906 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.123890 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:43 crc kubenswrapper[4787]: E0129 13:18:43.124191 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.624147915 +0000 UTC m=+162.385408191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.124308 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:43 crc kubenswrapper[4787]: E0129 13:18:43.124858 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-29 13:18:43.624850388 +0000 UTC m=+162.386110664 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-fq9qf" (UID: "945c6d0d-6e91-4805-937d-401bd0742688") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.175786 4787 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-29T13:18:42.655962012Z","Handler":null,"Name":""} Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.181017 4787 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.181071 4787 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.225521 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.229669 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.326746 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.331758 4787 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.331806 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.795265 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-fq9qf\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:43 crc kubenswrapper[4787]: I0129 13:18:43.820169 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:44 crc kubenswrapper[4787]: I0129 13:18:44.025145 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 29 13:18:44 crc kubenswrapper[4787]: I0129 13:18:44.035014 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" event={"ID":"7d1c9ddf-15c9-48b5-8a65-1ce9805585f9","Type":"ContainerStarted","Data":"324c06ce453d29ee7a128e8e11612a173d2bb540d7e3b4546214a0ee83cc5cbc"} Jan 29 13:18:44 crc kubenswrapper[4787]: I0129 13:18:44.065998 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-jxhlz" podStartSLOduration=27.065976197 podStartE2EDuration="27.065976197s" podCreationTimestamp="2026-01-29 13:18:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:44.065525362 +0000 UTC m=+162.826785658" watchObservedRunningTime="2026-01-29 13:18:44.065976197 +0000 UTC m=+162.827236463" Jan 29 13:18:44 crc kubenswrapper[4787]: I0129 13:18:44.277029 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:18:44 crc kubenswrapper[4787]: W0129 13:18:44.292267 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod945c6d0d_6e91_4805_937d_401bd0742688.slice/crio-e65b4d6608ecfac496fa424c991b0e519098811b5bedb9c14711f5f019fbec17 WatchSource:0}: Error finding container e65b4d6608ecfac496fa424c991b0e519098811b5bedb9c14711f5f019fbec17: Status 404 returned error can't find the container with id e65b4d6608ecfac496fa424c991b0e519098811b5bedb9c14711f5f019fbec17 Jan 29 13:18:45 crc kubenswrapper[4787]: I0129 13:18:45.043589 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" event={"ID":"945c6d0d-6e91-4805-937d-401bd0742688","Type":"ContainerStarted","Data":"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575"} Jan 29 13:18:45 crc kubenswrapper[4787]: I0129 13:18:45.044141 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" event={"ID":"945c6d0d-6e91-4805-937d-401bd0742688","Type":"ContainerStarted","Data":"e65b4d6608ecfac496fa424c991b0e519098811b5bedb9c14711f5f019fbec17"} Jan 29 13:18:45 crc kubenswrapper[4787]: I0129 13:18:45.066729 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" podStartSLOduration=140.066686489 podStartE2EDuration="2m20.066686489s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:18:45.064159397 +0000 UTC m=+163.825419673" watchObservedRunningTime="2026-01-29 13:18:45.066686489 +0000 UTC m=+163.827946765" Jan 29 13:18:46 crc kubenswrapper[4787]: I0129 13:18:46.063203 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.233075 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.233399 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" containerID="cri-o://c391d1152110658bb71ef2bf4394e692ea92e8e1f01bb4e5c4de7ce883b3e7aa" gracePeriod=30 Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.246921 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.247216 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" containerID="cri-o://f8ab09b7ebf64e29a28dca80eab27e110baae6645e187333f0882ba271d307fa" gracePeriod=30 Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.338736 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.350222 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0fcadf59-74fc-4aeb-abd6-55f6061fa5b0-metrics-certs\") pod \"network-metrics-daemon-gkrsx\" (UID: \"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0\") " pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:48 crc kubenswrapper[4787]: I0129 13:18:48.556934 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gkrsx" Jan 29 13:18:49 crc kubenswrapper[4787]: I0129 13:18:49.186486 4787 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t64vw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 29 13:18:49 crc kubenswrapper[4787]: I0129 13:18:49.186585 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 29 13:18:49 crc kubenswrapper[4787]: I0129 13:18:49.231125 4787 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4whcq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 13:18:49 crc kubenswrapper[4787]: I0129 13:18:49.231238 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.090650 4787 generic.go:334] "Generic (PLEG): container finished" podID="2431863b-8a4d-4897-a307-ed674bf53792" containerID="c391d1152110658bb71ef2bf4394e692ea92e8e1f01bb4e5c4de7ce883b3e7aa" exitCode=0 Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.090756 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" event={"ID":"2431863b-8a4d-4897-a307-ed674bf53792","Type":"ContainerDied","Data":"c391d1152110658bb71ef2bf4394e692ea92e8e1f01bb4e5c4de7ce883b3e7aa"} Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.095445 4787 generic.go:334] "Generic (PLEG): container finished" podID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerID="f8ab09b7ebf64e29a28dca80eab27e110baae6645e187333f0882ba271d307fa" exitCode=0 Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.095529 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" event={"ID":"91f03958-0c07-4b90-bf24-697aa18e3ebd","Type":"ContainerDied","Data":"f8ab09b7ebf64e29a28dca80eab27e110baae6645e187333f0882ba271d307fa"} Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.284755 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.284810 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.284836 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.284837 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.284905 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.285501 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.285511 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6"} pod="openshift-console/downloads-7954f5f757-n8d4v" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.285579 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:18:50 crc kubenswrapper[4787]: I0129 13:18:50.285604 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" containerID="cri-o://f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6" gracePeriod=2 Jan 29 13:18:58 crc kubenswrapper[4787]: I0129 13:18:58.395496 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:18:58 crc kubenswrapper[4787]: I0129 13:18:58.396436 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:18:59 crc kubenswrapper[4787]: I0129 13:18:59.186891 4787 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t64vw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 29 13:18:59 crc kubenswrapper[4787]: I0129 13:18:59.187018 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 29 13:18:59 crc kubenswrapper[4787]: I0129 13:18:59.230669 4787 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4whcq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 29 13:18:59 crc kubenswrapper[4787]: I0129 13:18:59.230755 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 29 13:19:00 crc kubenswrapper[4787]: I0129 13:19:00.285738 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:00 crc kubenswrapper[4787]: I0129 13:19:00.285858 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:00 crc kubenswrapper[4787]: I0129 13:19:00.798321 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-47xjm" Jan 29 13:19:03 crc kubenswrapper[4787]: I0129 13:19:03.824280 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.397607 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 13:19:08 crc kubenswrapper[4787]: E0129 13:19:08.398231 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01edeb2c-055a-4c37-af34-2f7229a3880e" containerName="pruner" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.398244 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="01edeb2c-055a-4c37-af34-2f7229a3880e" containerName="pruner" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.398360 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="01edeb2c-055a-4c37-af34-2f7229a3880e" containerName="pruner" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.398877 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.400932 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.402118 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.411940 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.478180 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.478567 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.580354 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.580565 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.580791 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.605050 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:08 crc kubenswrapper[4787]: I0129 13:19:08.727959 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.187775 4787 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-t64vw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.187976 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.231097 4787 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4whcq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.231217 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.284107 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.284237 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.720932 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.728693 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.759006 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:19:10 crc kubenswrapper[4787]: E0129 13:19:10.759328 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.759345 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: E0129 13:19:10.759359 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.759366 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.759508 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" containerName="route-controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.759522 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2431863b-8a4d-4897-a307-ed674bf53792" containerName="controller-manager" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.760061 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813127 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert\") pod \"2431863b-8a4d-4897-a307-ed674bf53792\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813228 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca\") pod \"2431863b-8a4d-4897-a307-ed674bf53792\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813255 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knv76\" (UniqueName: \"kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76\") pod \"2431863b-8a4d-4897-a307-ed674bf53792\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813312 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config\") pod \"91f03958-0c07-4b90-bf24-697aa18e3ebd\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813363 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca\") pod \"91f03958-0c07-4b90-bf24-697aa18e3ebd\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813399 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config\") pod \"2431863b-8a4d-4897-a307-ed674bf53792\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813589 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmmd6\" (UniqueName: \"kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6\") pod \"91f03958-0c07-4b90-bf24-697aa18e3ebd\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813628 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles\") pod \"2431863b-8a4d-4897-a307-ed674bf53792\" (UID: \"2431863b-8a4d-4897-a307-ed674bf53792\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813682 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert\") pod \"91f03958-0c07-4b90-bf24-697aa18e3ebd\" (UID: \"91f03958-0c07-4b90-bf24-697aa18e3ebd\") " Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.813988 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6crs2\" (UniqueName: \"kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814118 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814158 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814197 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814195 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca" (OuterVolumeSpecName: "client-ca") pod "2431863b-8a4d-4897-a307-ed674bf53792" (UID: "2431863b-8a4d-4897-a307-ed674bf53792"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814277 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814323 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.814681 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "2431863b-8a4d-4897-a307-ed674bf53792" (UID: "2431863b-8a4d-4897-a307-ed674bf53792"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.816975 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca" (OuterVolumeSpecName: "client-ca") pod "91f03958-0c07-4b90-bf24-697aa18e3ebd" (UID: "91f03958-0c07-4b90-bf24-697aa18e3ebd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.817701 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76" (OuterVolumeSpecName: "kube-api-access-knv76") pod "2431863b-8a4d-4897-a307-ed674bf53792" (UID: "2431863b-8a4d-4897-a307-ed674bf53792"). InnerVolumeSpecName "kube-api-access-knv76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.817719 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config" (OuterVolumeSpecName: "config") pod "91f03958-0c07-4b90-bf24-697aa18e3ebd" (UID: "91f03958-0c07-4b90-bf24-697aa18e3ebd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.817729 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2431863b-8a4d-4897-a307-ed674bf53792" (UID: "2431863b-8a4d-4897-a307-ed674bf53792"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.818253 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config" (OuterVolumeSpecName: "config") pod "2431863b-8a4d-4897-a307-ed674bf53792" (UID: "2431863b-8a4d-4897-a307-ed674bf53792"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.818527 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.821726 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6" (OuterVolumeSpecName: "kube-api-access-lmmd6") pod "91f03958-0c07-4b90-bf24-697aa18e3ebd" (UID: "91f03958-0c07-4b90-bf24-697aa18e3ebd"). InnerVolumeSpecName "kube-api-access-lmmd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.824277 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "91f03958-0c07-4b90-bf24-697aa18e3ebd" (UID: "91f03958-0c07-4b90-bf24-697aa18e3ebd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.915747 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.915838 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.915867 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.915966 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916000 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6crs2\" (UniqueName: \"kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916349 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916366 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/91f03958-0c07-4b90-bf24-697aa18e3ebd-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916378 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916389 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmmd6\" (UniqueName: \"kubernetes.io/projected/91f03958-0c07-4b90-bf24-697aa18e3ebd-kube-api-access-lmmd6\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916402 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2431863b-8a4d-4897-a307-ed674bf53792-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.917870 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.917878 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.916411 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91f03958-0c07-4b90-bf24-697aa18e3ebd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.917944 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2431863b-8a4d-4897-a307-ed674bf53792-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.917955 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knv76\" (UniqueName: \"kubernetes.io/projected/2431863b-8a4d-4897-a307-ed674bf53792-kube-api-access-knv76\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.920118 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.935688 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:10 crc kubenswrapper[4787]: I0129 13:19:10.939923 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6crs2\" (UniqueName: \"kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2\") pod \"controller-manager-8bcdb7fcd-chksx\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.151761 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.233823 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" event={"ID":"91f03958-0c07-4b90-bf24-697aa18e3ebd","Type":"ContainerDied","Data":"5c91fb7e11eeb81f3a494a78cd43aed8f3a7b730601aa1b47cf80a9ec0272185"} Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.233902 4787 scope.go:117] "RemoveContainer" containerID="f8ab09b7ebf64e29a28dca80eab27e110baae6645e187333f0882ba271d307fa" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.234053 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.237742 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" event={"ID":"2431863b-8a4d-4897-a307-ed674bf53792","Type":"ContainerDied","Data":"b33ba00c21070b65802192d07bfebeafd6732a7047d776bd319e75a8610cffc2"} Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.237826 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4whcq" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.286192 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.293349 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-t64vw"] Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.297360 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.301099 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4whcq"] Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.993128 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2431863b-8a4d-4897-a307-ed674bf53792" path="/var/lib/kubelet/pods/2431863b-8a4d-4897-a307-ed674bf53792/volumes" Jan 29 13:19:11 crc kubenswrapper[4787]: I0129 13:19:11.993688 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91f03958-0c07-4b90-bf24-697aa18e3ebd" path="/var/lib/kubelet/pods/91f03958-0c07-4b90-bf24-697aa18e3ebd/volumes" Jan 29 13:19:12 crc kubenswrapper[4787]: I0129 13:19:12.088121 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.520171 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx"] Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.521621 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.527310 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.528249 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.528267 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.528656 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.528847 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.528996 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.532993 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx"] Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.659612 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj2tr\" (UniqueName: \"kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.659712 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.659755 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.659793 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.761078 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj2tr\" (UniqueName: \"kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.761185 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.761229 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.761267 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.762965 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.763136 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.770420 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.779394 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj2tr\" (UniqueName: \"kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr\") pod \"route-controller-manager-b64bd8c5d-szccx\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:13 crc kubenswrapper[4787]: I0129 13:19:13.841316 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.007408 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.008963 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.013977 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.064351 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.064798 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.064928 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.166857 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.166919 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.166955 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.167054 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.167486 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.191430 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access\") pod \"installer-9-crc\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.281546 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerID="f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6" exitCode=0 Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.281614 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerDied","Data":"f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6"} Jan 29 13:19:14 crc kubenswrapper[4787]: I0129 13:19:14.340280 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.255532 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.255814 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vlwvh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-j7bsl_openshift-marketplace(5c3abece-22a6-44f4-9b3d-77ad9eed03b7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.257058 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-j7bsl" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.802317 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.802952 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lqvsf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-fzxtm_openshift-marketplace(df99e00f-9a78-454e-9f8e-5da684f374a1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\": context canceled" logger="UnhandledError" Jan 29 13:19:16 crc kubenswrapper[4787]: E0129 13:19:16.804551 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:375463ce314e9870c2ef316f6ae8ec2bad821721d7dac5d2800db42bce264bea\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-fzxtm" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" Jan 29 13:19:18 crc kubenswrapper[4787]: E0129 13:19:18.092799 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-fzxtm" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" Jan 29 13:19:18 crc kubenswrapper[4787]: E0129 13:19:18.093342 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-j7bsl" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" Jan 29 13:19:18 crc kubenswrapper[4787]: E0129 13:19:18.378670 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 13:19:18 crc kubenswrapper[4787]: E0129 13:19:18.378924 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgdjb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-wxvjz_openshift-marketplace(2236dfad-b7be-4375-9661-287dbeeec969): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:18 crc kubenswrapper[4787]: E0129 13:19:18.380121 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-wxvjz" podUID="2236dfad-b7be-4375-9661-287dbeeec969" Jan 29 13:19:19 crc kubenswrapper[4787]: E0129 13:19:19.232346 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 29 13:19:19 crc kubenswrapper[4787]: E0129 13:19:19.233395 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2m2tn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-pww2l_openshift-marketplace(95ac4597-f6a6-4a47-8892-d5b556c3363e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:19 crc kubenswrapper[4787]: E0129 13:19:19.234858 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-pww2l" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" Jan 29 13:19:20 crc kubenswrapper[4787]: I0129 13:19:20.286206 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:20 crc kubenswrapper[4787]: I0129 13:19:20.286804 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:21 crc kubenswrapper[4787]: E0129 13:19:21.714325 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-pww2l" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" Jan 29 13:19:22 crc kubenswrapper[4787]: E0129 13:19:22.182119 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 13:19:22 crc kubenswrapper[4787]: E0129 13:19:22.183966 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5kbxs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-87tmc_openshift-marketplace(d376a31e-47be-4275-a440-5a961fb875d3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:22 crc kubenswrapper[4787]: E0129 13:19:22.185202 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-87tmc" podUID="d376a31e-47be-4275-a440-5a961fb875d3" Jan 29 13:19:23 crc kubenswrapper[4787]: E0129 13:19:23.785221 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-87tmc" podUID="d376a31e-47be-4275-a440-5a961fb875d3" Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.080044 4787 scope.go:117] "RemoveContainer" containerID="c391d1152110658bb71ef2bf4394e692ea92e8e1f01bb4e5c4de7ce883b3e7aa" Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.150602 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gkrsx"] Jan 29 13:19:26 crc kubenswrapper[4787]: W0129 13:19:26.163742 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0fcadf59_74fc_4aeb_abd6_55f6061fa5b0.slice/crio-b3c57146f32e59ccc6c8e9360e3e5b692d31273749d44ee088c6f4a5d16c7f73 WatchSource:0}: Error finding container b3c57146f32e59ccc6c8e9360e3e5b692d31273749d44ee088c6f4a5d16c7f73: Status 404 returned error can't find the container with id b3c57146f32e59ccc6c8e9360e3e5b692d31273749d44ee088c6f4a5d16c7f73 Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.366898 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" event={"ID":"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0","Type":"ContainerStarted","Data":"b3c57146f32e59ccc6c8e9360e3e5b692d31273749d44ee088c6f4a5d16c7f73"} Jan 29 13:19:26 crc kubenswrapper[4787]: E0129 13:19:26.443858 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 13:19:26 crc kubenswrapper[4787]: E0129 13:19:26.444097 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hsqb5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-xp748_openshift-marketplace(52138722-381d-473d-85ab-f4961a18819c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:26 crc kubenswrapper[4787]: E0129 13:19:26.445415 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-xp748" podUID="52138722-381d-473d-85ab-f4961a18819c" Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.563479 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.581286 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:19:26 crc kubenswrapper[4787]: W0129 13:19:26.592013 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a8076ca_17fd_4373_8891_1ea440b462b6.slice/crio-4378221d84b50d75ed45f693fece056ed53e52d2b7249ab786cecc5d6e8d457b WatchSource:0}: Error finding container 4378221d84b50d75ed45f693fece056ed53e52d2b7249ab786cecc5d6e8d457b: Status 404 returned error can't find the container with id 4378221d84b50d75ed45f693fece056ed53e52d2b7249ab786cecc5d6e8d457b Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.634971 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 29 13:19:26 crc kubenswrapper[4787]: I0129 13:19:26.644320 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx"] Jan 29 13:19:27 crc kubenswrapper[4787]: I0129 13:19:27.375289 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" event={"ID":"e05f7373-7245-46f6-ae2f-73bac6ebac37","Type":"ContainerStarted","Data":"db44dce8eb960be685ed89cdc76e6cd360be7e5a8399e4a23045c77c5a2d0efb"} Jan 29 13:19:27 crc kubenswrapper[4787]: I0129 13:19:27.377245 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" event={"ID":"8a8076ca-17fd-4373-8891-1ea440b462b6","Type":"ContainerStarted","Data":"4378221d84b50d75ed45f693fece056ed53e52d2b7249ab786cecc5d6e8d457b"} Jan 29 13:19:27 crc kubenswrapper[4787]: I0129 13:19:27.379332 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3575cdac-d663-47d0-a7bb-7445da552f35","Type":"ContainerStarted","Data":"2fb520563eaaf96067da50b7f33eced69194ab15138afa895aa5210d89403e82"} Jan 29 13:19:27 crc kubenswrapper[4787]: I0129 13:19:27.381910 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2","Type":"ContainerStarted","Data":"234428b0e5bb43c2216f6ee4f9022675b049567b0392b0020525198564cb3937"} Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.392642 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerStarted","Data":"9fa4598cb6901996ad35b4ddc2b2aaa15cf214965c9a021077d77ca98d9973a8"} Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.394776 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.394834 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.395554 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.396343 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.396423 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef" gracePeriod=600 Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.396750 4787 generic.go:334] "Generic (PLEG): container finished" podID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerID="7ce8cf61b259780a2db99500dfd133f86ed831cc4d2091572b57c52b88af1268" exitCode=0 Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.396835 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerDied","Data":"7ce8cf61b259780a2db99500dfd133f86ed831cc4d2091572b57c52b88af1268"} Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.401415 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2","Type":"ContainerStarted","Data":"abc92ee689ea8d1eb3d3ce68b84a1fbaddfbfbb49a15b3bf18255755fa6ff80b"} Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.406976 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" event={"ID":"e05f7373-7245-46f6-ae2f-73bac6ebac37","Type":"ContainerStarted","Data":"ad7ca122394636ac574a2f689d0e1331922e82de15a5c35389e5f913f28fdc55"} Jan 29 13:19:28 crc kubenswrapper[4787]: I0129 13:19:28.408984 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" event={"ID":"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0","Type":"ContainerStarted","Data":"3760bd661dfaca036d9a0eae4192ff484e8bef7e25259026fdd989278d176ee0"} Jan 29 13:19:29 crc kubenswrapper[4787]: I0129 13:19:29.443201 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" event={"ID":"8a8076ca-17fd-4373-8891-1ea440b462b6","Type":"ContainerStarted","Data":"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb"} Jan 29 13:19:29 crc kubenswrapper[4787]: I0129 13:19:29.444352 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3575cdac-d663-47d0-a7bb-7445da552f35","Type":"ContainerStarted","Data":"d95b61ae6f8feaaceebfc85a6eeefcfa46f6c7243ae5588056497bdfbe1d8213"} Jan 29 13:19:29 crc kubenswrapper[4787]: I0129 13:19:29.444924 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:19:29 crc kubenswrapper[4787]: I0129 13:19:29.445417 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:29 crc kubenswrapper[4787]: I0129 13:19:29.445540 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.283971 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.284614 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.284090 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.284944 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.451257 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:30 crc kubenswrapper[4787]: I0129 13:19:30.451331 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.471733 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef" exitCode=0 Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.471818 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef"} Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.474646 4787 generic.go:334] "Generic (PLEG): container finished" podID="a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" containerID="abc92ee689ea8d1eb3d3ce68b84a1fbaddfbfbb49a15b3bf18255755fa6ff80b" exitCode=0 Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.474768 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2","Type":"ContainerDied","Data":"abc92ee689ea8d1eb3d3ce68b84a1fbaddfbfbb49a15b3bf18255755fa6ff80b"} Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.475178 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.490805 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.522725 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" podStartSLOduration=24.522703716 podStartE2EDuration="24.522703716s" podCreationTimestamp="2026-01-29 13:19:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:19:32.501955214 +0000 UTC m=+211.263215510" watchObservedRunningTime="2026-01-29 13:19:32.522703716 +0000 UTC m=+211.283963992" Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.546001 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=19.54594372 podStartE2EDuration="19.54594372s" podCreationTimestamp="2026-01-29 13:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:19:32.524105712 +0000 UTC m=+211.285365998" watchObservedRunningTime="2026-01-29 13:19:32.54594372 +0000 UTC m=+211.307203996" Jan 29 13:19:32 crc kubenswrapper[4787]: I0129 13:19:32.547259 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" podStartSLOduration=24.547251103 podStartE2EDuration="24.547251103s" podCreationTimestamp="2026-01-29 13:19:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:19:32.544865234 +0000 UTC m=+211.306125520" watchObservedRunningTime="2026-01-29 13:19:32.547251103 +0000 UTC m=+211.308511379" Jan 29 13:19:32 crc kubenswrapper[4787]: E0129 13:19:32.564341 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 29 13:19:32 crc kubenswrapper[4787]: E0129 13:19:32.564625 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fnl7g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-z6slf_openshift-marketplace(60be26cc-9957-4401-85dd-7572bb78975f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:19:32 crc kubenswrapper[4787]: E0129 13:19:32.565926 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-z6slf" podUID="60be26cc-9957-4401-85dd-7572bb78975f" Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.827428 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.934610 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir\") pod \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.934801 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" (UID: "a2263bc8-b1a0-4db5-8fe2-3dc0871154c2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.935579 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access\") pod \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\" (UID: \"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2\") " Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.936105 4787 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:33 crc kubenswrapper[4787]: I0129 13:19:33.949433 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" (UID: "a2263bc8-b1a0-4db5-8fe2-3dc0871154c2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:19:34 crc kubenswrapper[4787]: I0129 13:19:34.038063 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a2263bc8-b1a0-4db5-8fe2-3dc0871154c2-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:19:34 crc kubenswrapper[4787]: I0129 13:19:34.489898 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a2263bc8-b1a0-4db5-8fe2-3dc0871154c2","Type":"ContainerDied","Data":"234428b0e5bb43c2216f6ee4f9022675b049567b0392b0020525198564cb3937"} Jan 29 13:19:34 crc kubenswrapper[4787]: I0129 13:19:34.489987 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="234428b0e5bb43c2216f6ee4f9022675b049567b0392b0020525198564cb3937" Jan 29 13:19:34 crc kubenswrapper[4787]: I0129 13:19:34.489936 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 29 13:19:34 crc kubenswrapper[4787]: I0129 13:19:34.491950 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gkrsx" event={"ID":"0fcadf59-74fc-4aeb-abd6-55f6061fa5b0","Type":"ContainerStarted","Data":"d0f9ecd7d7645ab1beffb9a120672e002e54e4afbb360373b6c7f4df2599aa7c"} Jan 29 13:19:35 crc kubenswrapper[4787]: I0129 13:19:35.525183 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-gkrsx" podStartSLOduration=190.525156859 podStartE2EDuration="3m10.525156859s" podCreationTimestamp="2026-01-29 13:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:19:35.518349595 +0000 UTC m=+214.279609871" watchObservedRunningTime="2026-01-29 13:19:35.525156859 +0000 UTC m=+214.286417155" Jan 29 13:19:36 crc kubenswrapper[4787]: I0129 13:19:36.515551 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447"} Jan 29 13:19:40 crc kubenswrapper[4787]: I0129 13:19:40.284919 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:40 crc kubenswrapper[4787]: I0129 13:19:40.286747 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:40 crc kubenswrapper[4787]: I0129 13:19:40.284962 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:40 crc kubenswrapper[4787]: I0129 13:19:40.286919 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:41 crc kubenswrapper[4787]: I0129 13:19:41.152545 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:41 crc kubenswrapper[4787]: I0129 13:19:41.159669 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.284375 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.285385 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.285439 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.284720 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.285920 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.286159 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"9fa4598cb6901996ad35b4ddc2b2aaa15cf214965c9a021077d77ca98d9973a8"} pod="openshift-console/downloads-7954f5f757-n8d4v" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.286202 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" containerID="cri-o://9fa4598cb6901996ad35b4ddc2b2aaa15cf214965c9a021077d77ca98d9973a8" gracePeriod=2 Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.286277 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:19:50 crc kubenswrapper[4787]: I0129 13:19:50.286314 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:19:51 crc kubenswrapper[4787]: I0129 13:19:51.731505 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerID="9fa4598cb6901996ad35b4ddc2b2aaa15cf214965c9a021077d77ca98d9973a8" exitCode=0 Jan 29 13:19:51 crc kubenswrapper[4787]: I0129 13:19:51.731659 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerDied","Data":"9fa4598cb6901996ad35b4ddc2b2aaa15cf214965c9a021077d77ca98d9973a8"} Jan 29 13:19:51 crc kubenswrapper[4787]: I0129 13:19:51.731922 4787 scope.go:117] "RemoveContainer" containerID="f2d0566efe6c7342211238841e4914e43d6ad5c7a17daf2e976901af715b1eb6" Jan 29 13:20:00 crc kubenswrapper[4787]: I0129 13:20:00.284677 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:00 crc kubenswrapper[4787]: I0129 13:20:00.285705 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.232716 4787 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.234646 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619" gracePeriod=15 Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.234687 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc" gracePeriod=15 Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.234739 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44" gracePeriod=15 Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.234616 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178" gracePeriod=15 Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.234647 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5" gracePeriod=15 Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.236718 4787 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.237138 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.237238 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.237321 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.237400 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.237499 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.237584 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.237668 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.237743 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.237824 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" containerName="pruner" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.237938 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" containerName="pruner" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.238027 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238144 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.238233 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238309 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.238387 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238473 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238708 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238794 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238887 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.238968 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.239048 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.239126 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2263bc8-b1a0-4db5-8fe2-3dc0871154c2" containerName="pruner" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.239200 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.239277 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.239495 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.239589 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.244070 4787 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.245110 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.251365 4787 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 29 13:20:06 crc kubenswrapper[4787]: E0129 13:20:06.297053 4787 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.203:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345219 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345325 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345375 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345468 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345506 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345538 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345564 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.345602 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447337 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447410 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447446 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447492 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447528 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447559 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447596 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447559 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447618 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447645 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447659 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447734 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447799 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447845 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447953 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.447988 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:06 crc kubenswrapper[4787]: I0129 13:20:06.598429 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:07 crc kubenswrapper[4787]: I0129 13:20:07.869447 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:20:07 crc kubenswrapper[4787]: I0129 13:20:07.871002 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 13:20:07 crc kubenswrapper[4787]: I0129 13:20:07.872043 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc" exitCode=2 Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.362619 4787 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.376320 4787 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.376644 4787 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.377989 4787 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.378371 4787 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:08 crc kubenswrapper[4787]: I0129 13:20:08.378442 4787 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.378829 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="200ms" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.581187 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="400ms" Jan 29 13:20:08 crc kubenswrapper[4787]: E0129 13:20:08.982945 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="800ms" Jan 29 13:20:09 crc kubenswrapper[4787]: E0129 13:20:09.783381 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="1.6s" Jan 29 13:20:09 crc kubenswrapper[4787]: I0129 13:20:09.887691 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:20:09 crc kubenswrapper[4787]: I0129 13:20:09.889530 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 13:20:09 crc kubenswrapper[4787]: I0129 13:20:09.890244 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619" exitCode=0 Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.284560 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.286448 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.901575 4787 generic.go:334] "Generic (PLEG): container finished" podID="3575cdac-d663-47d0-a7bb-7445da552f35" containerID="d95b61ae6f8feaaceebfc85a6eeefcfa46f6c7243ae5588056497bdfbe1d8213" exitCode=0 Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.901710 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3575cdac-d663-47d0-a7bb-7445da552f35","Type":"ContainerDied","Data":"d95b61ae6f8feaaceebfc85a6eeefcfa46f6c7243ae5588056497bdfbe1d8213"} Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.903593 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.905969 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.908488 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.910129 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44" exitCode=0 Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.910174 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5" exitCode=0 Jan 29 13:20:10 crc kubenswrapper[4787]: I0129 13:20:10.910191 4787 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178" exitCode=0 Jan 29 13:20:11 crc kubenswrapper[4787]: E0129 13:20:11.385095 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="3.2s" Jan 29 13:20:11 crc kubenswrapper[4787]: I0129 13:20:11.989587 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:14 crc kubenswrapper[4787]: E0129 13:20:14.586388 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="6.4s" Jan 29 13:20:15 crc kubenswrapper[4787]: E0129 13:20:15.370490 4787 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.203:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-n44gv.188f363d45a8d350 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-n44gv,UID:4f791725-08e7-42f5-b0ee-cd67dfc1fc1b,APIVersion:v1,ResourceVersion:28874,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 45.922s (45.922s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 13:20:15.36949128 +0000 UTC m=+254.130751546,LastTimestamp:2026-01-29 13:20:15.36949128 +0000 UTC m=+254.130751546,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 13:20:15 crc kubenswrapper[4787]: E0129 13:20:15.389672 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 29 13:20:15 crc kubenswrapper[4787]: E0129 13:20:15.389922 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5kbxs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-87tmc_openshift-marketplace(d376a31e-47be-4275-a440-5a961fb875d3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 29 13:20:15 crc kubenswrapper[4787]: E0129 13:20:15.391076 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-87tmc" podUID="d376a31e-47be-4275-a440-5a961fb875d3" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.462911 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.463770 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.631813 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock\") pod \"3575cdac-d663-47d0-a7bb-7445da552f35\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.631961 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock" (OuterVolumeSpecName: "var-lock") pod "3575cdac-d663-47d0-a7bb-7445da552f35" (UID: "3575cdac-d663-47d0-a7bb-7445da552f35"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.631972 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir\") pod \"3575cdac-d663-47d0-a7bb-7445da552f35\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.632019 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3575cdac-d663-47d0-a7bb-7445da552f35" (UID: "3575cdac-d663-47d0-a7bb-7445da552f35"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.632129 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access\") pod \"3575cdac-d663-47d0-a7bb-7445da552f35\" (UID: \"3575cdac-d663-47d0-a7bb-7445da552f35\") " Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.632367 4787 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.632386 4787 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3575cdac-d663-47d0-a7bb-7445da552f35-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.642571 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3575cdac-d663-47d0-a7bb-7445da552f35" (UID: "3575cdac-d663-47d0-a7bb-7445da552f35"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.733890 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3575cdac-d663-47d0-a7bb-7445da552f35-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.952150 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3575cdac-d663-47d0-a7bb-7445da552f35","Type":"ContainerDied","Data":"2fb520563eaaf96067da50b7f33eced69194ab15138afa895aa5210d89403e82"} Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.952214 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2fb520563eaaf96067da50b7f33eced69194ab15138afa895aa5210d89403e82" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.952352 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 29 13:20:15 crc kubenswrapper[4787]: I0129 13:20:15.972335 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.049083 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.051556 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.052819 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.053762 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.054531 4787 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.169347 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.169564 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.169677 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.169701 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.169788 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.170274 4787 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.170305 4787 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.172085 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.172517 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.271412 4787 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.987539 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.989323 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:18 crc kubenswrapper[4787]: I0129 13:20:18.990188 4787 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:19 crc kubenswrapper[4787]: I0129 13:20:19.006878 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:19 crc kubenswrapper[4787]: I0129 13:20:19.007557 4787 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:19 crc kubenswrapper[4787]: E0129 13:20:19.747013 4787 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.203:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-n44gv.188f363d45a8d350 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-n44gv,UID:4f791725-08e7-42f5-b0ee-cd67dfc1fc1b,APIVersion:v1,ResourceVersion:28874,FieldPath:spec.containers{registry-server},},Reason:Pulled,Message:Successfully pulled image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\" in 45.922s (45.922s including waiting). Image size: 907837715 bytes.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-29 13:20:15.36949128 +0000 UTC m=+254.130751546,LastTimestamp:2026-01-29 13:20:15.36949128 +0000 UTC m=+254.130751546,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.002043 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.172686 4787 scope.go:117] "RemoveContainer" containerID="5bf7bcd21d354c603b5075f0ee133048c78a3c3d4a623576c6d5646cea24c619" Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.284692 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.284788 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.985634 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:20 crc kubenswrapper[4787]: E0129 13:20:20.987728 4787 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.203:6443: connect: connection refused" interval="7s" Jan 29 13:20:20 crc kubenswrapper[4787]: I0129 13:20:20.988177 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:21 crc kubenswrapper[4787]: I0129 13:20:21.010866 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:21 crc kubenswrapper[4787]: I0129 13:20:21.010929 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:21 crc kubenswrapper[4787]: E0129 13:20:21.011809 4787 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:21 crc kubenswrapper[4787]: I0129 13:20:21.012599 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:21 crc kubenswrapper[4787]: I0129 13:20:21.995655 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:21 crc kubenswrapper[4787]: I0129 13:20:21.997484 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.019318 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.019587 4787 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7" exitCode=1 Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.019721 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7"} Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.020321 4787 scope.go:117] "RemoveContainer" containerID="b796cee5406d226ad67f9feaf380a4469bf021d227b1e9475da658f89c7895f7" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.021364 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.022390 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.022818 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.868416 4787 scope.go:117] "RemoveContainer" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:20:22 crc kubenswrapper[4787]: E0129 13:20:22.869732 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\": container with ID starting with 49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc not found: ID does not exist" containerID="49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.869783 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc"} err="failed to get container status \"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\": rpc error: code = NotFound desc = could not find container \"49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc\": container with ID starting with 49e0199545b30b7a859e96328baf886ba057db84d03f864ce139c5d0e91df6fc not found: ID does not exist" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.869816 4787 scope.go:117] "RemoveContainer" containerID="7674ba8b2646985ace80811cb55606c1a05f0da4731147028b786455a7b84f44" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.923127 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:22 crc kubenswrapper[4787]: I0129 13:20:22.996429 4787 scope.go:117] "RemoveContainer" containerID="bf0cfcaaf9b687f73b8ce747d6cb46b7c05d4380f89c12d7a0e5137f8ea4a3a5" Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.006556 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:23 crc kubenswrapper[4787]: W0129 13:20:23.011636 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-877821842b04d5ecebb3763e9c3eb9e2a876e978cff730dcb23a8867323f963f WatchSource:0}: Error finding container 877821842b04d5ecebb3763e9c3eb9e2a876e978cff730dcb23a8867323f963f: Status 404 returned error can't find the container with id 877821842b04d5ecebb3763e9c3eb9e2a876e978cff730dcb23a8867323f963f Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.033572 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"7782e3cd0bc72ff506dd579d842c76cbe59712892463460e275f24b05e452c7a"} Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.036213 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"877821842b04d5ecebb3763e9c3eb9e2a876e978cff730dcb23a8867323f963f"} Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.039134 4787 scope.go:117] "RemoveContainer" containerID="c7b4f85f5728ed4b844c45a9359691ba7b6d120f7f52d0e8e4b1d14aaccbf3fc" Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.079939 4787 scope.go:117] "RemoveContainer" containerID="545226279c665c04dae521ee4483efdd295ca53b23bc0203017b9eb0a261a178" Jan 29 13:20:23 crc kubenswrapper[4787]: I0129 13:20:23.135394 4787 scope.go:117] "RemoveContainer" containerID="de11bdd02639502a69c1c0b2bb9bcc9d8d19b70f718efb1bfd3b5943cf541de9" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.056315 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerDied","Data":"c8b8bfb5c8dec79986322d62ed54e8949a3788c19636f158c523e0030b7e765c"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.056202 4787 generic.go:334] "Generic (PLEG): container finished" podID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerID="c8b8bfb5c8dec79986322d62ed54e8949a3788c19636f158c523e0030b7e765c" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.058616 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.058892 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.059321 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.059733 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.061094 4787 generic.go:334] "Generic (PLEG): container finished" podID="60be26cc-9957-4401-85dd-7572bb78975f" containerID="f5e50636ebcbeb34640eebd648a6745b756f16711d3f83fbfe4a49d63175212b" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.061161 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerDied","Data":"f5e50636ebcbeb34640eebd648a6745b756f16711d3f83fbfe4a49d63175212b"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.062134 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.062886 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.063306 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.063833 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.064663 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.067137 4787 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="42db3584a966e0b332ae64be08aee3d7c98e73b07428c37be24104da6ecd9296" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.067190 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"42db3584a966e0b332ae64be08aee3d7c98e73b07428c37be24104da6ecd9296"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.067472 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.067492 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.068078 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: E0129 13:20:24.068181 4787 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.068258 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.068407 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.068573 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.068727 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.070802 4787 generic.go:334] "Generic (PLEG): container finished" podID="2236dfad-b7be-4375-9661-287dbeeec969" containerID="8250f58a4cc6bcae572f251756c9d2615dd1191813b96fd258e3f9e127fd3220" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.070915 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerDied","Data":"8250f58a4cc6bcae572f251756c9d2615dd1191813b96fd258e3f9e127fd3220"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.072376 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.073157 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.073725 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.074105 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.074777 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.075945 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.076254 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerStarted","Data":"29e282650465855593933e61ba2cb1dc7b8fb2cfab016f1ce5e3e6347bd65b64"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.077183 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.077373 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.077556 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.078561 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.079020 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.079627 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.079934 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.084286 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.084531 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0967b33dcc1bb62472171bd07ed7318c0145d1c14080955fca5bd540315f806b"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.085529 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.085795 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.086217 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.086420 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.086652 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.086817 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.086968 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.091226 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerStarted","Data":"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.092573 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.092765 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.092971 4787 status_manager.go:851] "Failed to get status for pod" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" pod="openshift-marketplace/redhat-operators-fzxtm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-fzxtm\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.093136 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.093299 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.093560 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.093986 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.094191 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.095836 4787 generic.go:334] "Generic (PLEG): container finished" podID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerID="46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.095916 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerDied","Data":"46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.097269 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.097571 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.097863 4787 status_manager.go:851] "Failed to get status for pod" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" pod="openshift-marketplace/redhat-operators-fzxtm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-fzxtm\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.098132 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.098381 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.098725 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.098887 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f6550a6b39a1dd7439664de0cbb81b01b81f30ad40cfda49d8efe775f0cf1416"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.098982 4787 status_manager.go:851] "Failed to get status for pod" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" pod="openshift-marketplace/redhat-marketplace-j7bsl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j7bsl\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.099183 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: E0129 13:20:24.099317 4787 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.203:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.099371 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.099628 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.099872 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.100172 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.100392 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.100670 4787 status_manager.go:851] "Failed to get status for pod" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" pod="openshift-marketplace/redhat-operators-fzxtm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-fzxtm\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.100871 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.101136 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.101370 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.101625 4787 status_manager.go:851] "Failed to get status for pod" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" pod="openshift-marketplace/redhat-marketplace-j7bsl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j7bsl\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.101734 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n8d4v" event={"ID":"f4fd6365-d36d-4da8-8722-c4a542dae2eb","Type":"ContainerStarted","Data":"2977305cea25fce1e626af38787ba749792a71ac66847378d84f16b8c37c4544"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.102272 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.102334 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.102364 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.102491 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103285 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103583 4787 status_manager.go:851] "Failed to get status for pod" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" pod="openshift-marketplace/redhat-operators-fzxtm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-fzxtm\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103792 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103896 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerDied","Data":"51bc985f987d9fb5e30f8747bd319582deffda8c49afb8095b2f6b0b37abcf3d"} Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103804 4787 generic.go:334] "Generic (PLEG): container finished" podID="52138722-381d-473d-85ab-f4961a18819c" containerID="51bc985f987d9fb5e30f8747bd319582deffda8c49afb8095b2f6b0b37abcf3d" exitCode=0 Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.103978 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.104367 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.104803 4787 status_manager.go:851] "Failed to get status for pod" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" pod="openshift-marketplace/redhat-marketplace-j7bsl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j7bsl\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.105109 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.106075 4787 status_manager.go:851] "Failed to get status for pod" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" pod="openshift-console/downloads-7954f5f757-n8d4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-n8d4v\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.106404 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.106759 4787 status_manager.go:851] "Failed to get status for pod" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" pod="openshift-marketplace/community-operators-pww2l" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-pww2l\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.108202 4787 status_manager.go:851] "Failed to get status for pod" podUID="52138722-381d-473d-85ab-f4961a18819c" pod="openshift-marketplace/certified-operators-xp748" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-xp748\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.108627 4787 status_manager.go:851] "Failed to get status for pod" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.108910 4787 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.109114 4787 status_manager.go:851] "Failed to get status for pod" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" pod="openshift-marketplace/redhat-marketplace-j7bsl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-j7bsl\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.109278 4787 status_manager.go:851] "Failed to get status for pod" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" pod="openshift-console/downloads-7954f5f757-n8d4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/pods/downloads-7954f5f757-n8d4v\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.109439 4787 status_manager.go:851] "Failed to get status for pod" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" pod="openshift-marketplace/redhat-marketplace-n44gv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n44gv\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.109631 4787 status_manager.go:851] "Failed to get status for pod" podUID="60be26cc-9957-4401-85dd-7572bb78975f" pod="openshift-marketplace/certified-operators-z6slf" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-z6slf\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.109856 4787 status_manager.go:851] "Failed to get status for pod" podUID="2236dfad-b7be-4375-9661-287dbeeec969" pod="openshift-marketplace/community-operators-wxvjz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-wxvjz\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.110243 4787 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:24 crc kubenswrapper[4787]: I0129 13:20:24.110575 4787 status_manager.go:851] "Failed to get status for pod" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" pod="openshift-marketplace/redhat-operators-fzxtm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-fzxtm\": dial tcp 38.102.83.203:6443: connect: connection refused" Jan 29 13:20:25 crc kubenswrapper[4787]: I0129 13:20:25.113124 4787 generic.go:334] "Generic (PLEG): container finished" podID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerID="8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1" exitCode=0 Jan 29 13:20:25 crc kubenswrapper[4787]: I0129 13:20:25.113884 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerDied","Data":"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1"} Jan 29 13:20:25 crc kubenswrapper[4787]: I0129 13:20:25.119330 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c06b863f6ab0cf0b34d1199113e0215dca487b4c97552acb10a007272188f0be"} Jan 29 13:20:25 crc kubenswrapper[4787]: I0129 13:20:25.119945 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:25 crc kubenswrapper[4787]: I0129 13:20:25.119991 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:26 crc kubenswrapper[4787]: I0129 13:20:26.127929 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b09613e4c9e2b9289e20482c6cf3d275e7fae10a1c3703a5bacd63e8d5a4776e"} Jan 29 13:20:26 crc kubenswrapper[4787]: I0129 13:20:26.128177 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:26 crc kubenswrapper[4787]: I0129 13:20:26.128440 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:27 crc kubenswrapper[4787]: I0129 13:20:27.136005 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8961cc6f9299cac4fdfdf585b6c469da603cd56fed3a71e0faa68b6fd79e56cd"} Jan 29 13:20:27 crc kubenswrapper[4787]: E0129 13:20:27.473404 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-87tmc" podUID="d376a31e-47be-4275-a440-5a961fb875d3" Jan 29 13:20:28 crc kubenswrapper[4787]: I0129 13:20:28.306415 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:20:28 crc kubenswrapper[4787]: I0129 13:20:28.306508 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:20:29 crc kubenswrapper[4787]: I0129 13:20:29.153949 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerStarted","Data":"70872f43e234762504fa14291d7bc5bd220dcd2843936b11aee5e0958022b4ce"} Jan 29 13:20:29 crc kubenswrapper[4787]: I0129 13:20:29.473630 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:20:29 crc kubenswrapper[4787]: I0129 13:20:29.524589 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:20:29 crc kubenswrapper[4787]: I0129 13:20:29.860059 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:29 crc kubenswrapper[4787]: I0129 13:20:29.869112 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.181901 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ea920fd7f64d494bbe09daa052fbd0c9e5e021de0193aeffac96a3a0c3e47593"} Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.183245 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.284473 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.284932 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.284839 4787 patch_prober.go:28] interesting pod/downloads-7954f5f757-n8d4v container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Jan 29 13:20:30 crc kubenswrapper[4787]: I0129 13:20:30.285122 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n8d4v" podUID="f4fd6365-d36d-4da8-8722-c4a542dae2eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Jan 29 13:20:34 crc kubenswrapper[4787]: I0129 13:20:34.212590 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0e391fd0f7410edcd2e4a3cc646ecf11c0d05725fb8c0674dac87f3524f959d8"} Jan 29 13:20:34 crc kubenswrapper[4787]: I0129 13:20:34.214811 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerStarted","Data":"76f25c68cc6c694e3915654e2605f2509d97f1fb4b82758ecd1500eb306b10c4"} Jan 29 13:20:35 crc kubenswrapper[4787]: I0129 13:20:35.221412 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:35 crc kubenswrapper[4787]: I0129 13:20:35.221472 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:35 crc kubenswrapper[4787]: I0129 13:20:35.221506 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:35 crc kubenswrapper[4787]: I0129 13:20:35.231079 4787 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:35 crc kubenswrapper[4787]: I0129 13:20:35.246784 4787 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c7691a3-b0da-48f8-a49a-38b63841eb75\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c06b863f6ab0cf0b34d1199113e0215dca487b4c97552acb10a007272188f0be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:20:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8961cc6f9299cac4fdfdf585b6c469da603cd56fed3a71e0faa68b6fd79e56cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:20:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b09613e4c9e2b9289e20482c6cf3d275e7fae10a1c3703a5bacd63e8d5a4776e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e391fd0f7410edcd2e4a3cc646ecf11c0d05725fb8c0674dac87f3524f959d8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea920fd7f64d494bbe09daa052fbd0c9e5e021de0193aeffac96a3a0c3e47593\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-29T13:20:29Z\\\"}}}],\\\"phase\\\":\\\"Running\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Pod \"kube-apiserver-crc\" is invalid: metadata.uid: Invalid value: \"7c7691a3-b0da-48f8-a49a-38b63841eb75\": field is immutable" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.013329 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.013397 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.020687 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.025646 4787 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="25c341e3-e82d-4956-8e6b-7d18a6325b74" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.141542 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.141644 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.216770 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.229329 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.229373 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.245805 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.280430 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.379255 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.379335 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:20:36 crc kubenswrapper[4787]: I0129 13:20:36.436971 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:20:37 crc kubenswrapper[4787]: I0129 13:20:37.235400 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:37 crc kubenswrapper[4787]: I0129 13:20:37.235435 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:38 crc kubenswrapper[4787]: I0129 13:20:38.243549 4787 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:38 crc kubenswrapper[4787]: I0129 13:20:38.243605 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="7c7691a3-b0da-48f8-a49a-38b63841eb75" Jan 29 13:20:41 crc kubenswrapper[4787]: I0129 13:20:40.308218 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-n8d4v" Jan 29 13:20:41 crc kubenswrapper[4787]: I0129 13:20:41.546570 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 29 13:20:41 crc kubenswrapper[4787]: I0129 13:20:41.563991 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 29 13:20:41 crc kubenswrapper[4787]: I0129 13:20:41.761168 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.045400 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.049994 4787 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="25c341e3-e82d-4956-8e6b-7d18a6325b74" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.270669 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.295940 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.717160 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.781216 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.797777 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 29 13:20:42 crc kubenswrapper[4787]: I0129 13:20:42.928836 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 29 13:20:43 crc kubenswrapper[4787]: I0129 13:20:43.735814 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.231991 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.412924 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.452758 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.459858 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.491333 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.667005 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.695416 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.770861 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 29 13:20:44 crc kubenswrapper[4787]: I0129 13:20:44.932252 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.014605 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.153332 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.231441 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.505084 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.682199 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.767757 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 29 13:20:45 crc kubenswrapper[4787]: I0129 13:20:45.788360 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.007083 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.119424 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.248072 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.318325 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.327177 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.330873 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.488826 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.497302 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.510529 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.736216 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.759446 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.826141 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.981556 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 29 13:20:46 crc kubenswrapper[4787]: I0129 13:20:46.989540 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.066255 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.089625 4787 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.140209 4787 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.169692 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.234826 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.237112 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.272225 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.483879 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.498726 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.659046 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.690774 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.706918 4787 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.800263 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.809884 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 29 13:20:47 crc kubenswrapper[4787]: I0129 13:20:47.963120 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.123966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.132889 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.144321 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.227360 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.358020 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.490931 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.523575 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.565582 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.622832 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.626244 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.681122 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.760535 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.813140 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 29 13:20:48 crc kubenswrapper[4787]: I0129 13:20:48.994654 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.053233 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.144904 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.183049 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.247061 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.351243 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.471666 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.472865 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.790213 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.812137 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.860549 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 29 13:20:49 crc kubenswrapper[4787]: I0129 13:20:49.942444 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.083206 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.119036 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.119175 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.225166 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.280212 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.325266 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.375216 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.455663 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.615502 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.652026 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.690923 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 29 13:20:50 crc kubenswrapper[4787]: I0129 13:20:50.843122 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.111112 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.117408 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.211889 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.304223 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.362828 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.387073 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.414271 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.446159 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.458283 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.499396 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.548762 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.588836 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.712120 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.733754 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.734213 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.779992 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.910491 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 29 13:20:51 crc kubenswrapper[4787]: I0129 13:20:51.986625 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.002666 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.055120 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.141299 4787 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.148901 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.241097 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.243030 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.313167 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.343820 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.353193 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.359085 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.363528 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.536812 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.730668 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.791063 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.841894 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.865963 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.943550 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 29 13:20:52 crc kubenswrapper[4787]: I0129 13:20:52.984300 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.002543 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.091341 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.098856 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.182405 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.228345 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.305859 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.419914 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.591518 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.683030 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.689652 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.700249 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.770835 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.798771 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.833403 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 29 13:20:53 crc kubenswrapper[4787]: I0129 13:20:53.962851 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.079821 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.134783 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.300511 4787 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.342092 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.350269 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.408608 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.461722 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.471772 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.612835 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.664598 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.822682 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.884808 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.925034 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.959752 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 29 13:20:54 crc kubenswrapper[4787]: I0129 13:20:54.979772 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.100421 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.206400 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.372114 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.390289 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.402591 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.494894 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 13:20:55 crc kubenswrapper[4787]: I0129 13:20:55.639201 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 29 13:20:56 crc kubenswrapper[4787]: I0129 13:20:56.048248 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 29 13:20:56 crc kubenswrapper[4787]: I0129 13:20:56.104518 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 29 13:20:56 crc kubenswrapper[4787]: I0129 13:20:56.124689 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 29 13:20:56 crc kubenswrapper[4787]: I0129 13:20:56.210692 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 29 13:20:56 crc kubenswrapper[4787]: I0129 13:20:56.471464 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.017743 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.034525 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.051335 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.079023 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.129511 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.516507 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.854291 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 29 13:20:57 crc kubenswrapper[4787]: I0129 13:20:57.948855 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 13:20:59 crc kubenswrapper[4787]: I0129 13:20:59.385936 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerStarted","Data":"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e"} Jan 29 13:20:59 crc kubenswrapper[4787]: I0129 13:20:59.389820 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerStarted","Data":"62972b3c1510c2313de2aa93c401815995ff412103c3e9fcc1f9aa45b664aeab"} Jan 29 13:21:00 crc kubenswrapper[4787]: I0129 13:21:00.397159 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerStarted","Data":"cbb19a2bd8809579d61db7fce3fbd04990a2652440cc2f7a735a7a5300f9c3f4"} Jan 29 13:21:01 crc kubenswrapper[4787]: I0129 13:21:01.405674 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerStarted","Data":"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611"} Jan 29 13:21:01 crc kubenswrapper[4787]: I0129 13:21:01.767009 4787 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 29 13:21:03 crc kubenswrapper[4787]: I0129 13:21:03.848238 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 13:21:04 crc kubenswrapper[4787]: I0129 13:21:04.764574 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 29 13:21:05 crc kubenswrapper[4787]: I0129 13:21:05.936758 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:21:05 crc kubenswrapper[4787]: I0129 13:21:05.936890 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.006098 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.500286 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.603267 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.616042 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.616136 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:06 crc kubenswrapper[4787]: I0129 13:21:06.666573 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:07 crc kubenswrapper[4787]: I0129 13:21:07.262588 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 29 13:21:07 crc kubenswrapper[4787]: I0129 13:21:07.511101 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:07 crc kubenswrapper[4787]: I0129 13:21:07.719805 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 29 13:21:08 crc kubenswrapper[4787]: I0129 13:21:08.537921 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:08 crc kubenswrapper[4787]: I0129 13:21:08.537998 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:08 crc kubenswrapper[4787]: I0129 13:21:08.598538 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.219137 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.350864 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.396149 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.489900 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.525698 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.577828 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.577935 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:09 crc kubenswrapper[4787]: I0129 13:21:09.626842 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:10 crc kubenswrapper[4787]: I0129 13:21:10.154301 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 29 13:21:10 crc kubenswrapper[4787]: I0129 13:21:10.519333 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:10 crc kubenswrapper[4787]: I0129 13:21:10.787638 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 13:21:11 crc kubenswrapper[4787]: I0129 13:21:11.428724 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 29 13:21:11 crc kubenswrapper[4787]: I0129 13:21:11.541079 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 29 13:21:11 crc kubenswrapper[4787]: I0129 13:21:11.567904 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 29 13:21:12 crc kubenswrapper[4787]: I0129 13:21:12.486727 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerStarted","Data":"66ec05fbf8058dee6adf21bdd1b253573a3dfd9d9c7cb5d847a6b932dcb83adf"} Jan 29 13:21:12 crc kubenswrapper[4787]: I0129 13:21:12.615376 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 29 13:21:12 crc kubenswrapper[4787]: I0129 13:21:12.761825 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 29 13:21:12 crc kubenswrapper[4787]: I0129 13:21:12.904161 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 29 13:21:13 crc kubenswrapper[4787]: I0129 13:21:13.175399 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 29 13:21:13 crc kubenswrapper[4787]: I0129 13:21:13.417723 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 29 13:21:13 crc kubenswrapper[4787]: I0129 13:21:13.498980 4787 generic.go:334] "Generic (PLEG): container finished" podID="d376a31e-47be-4275-a440-5a961fb875d3" containerID="66ec05fbf8058dee6adf21bdd1b253573a3dfd9d9c7cb5d847a6b932dcb83adf" exitCode=0 Jan 29 13:21:13 crc kubenswrapper[4787]: I0129 13:21:13.499075 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerDied","Data":"66ec05fbf8058dee6adf21bdd1b253573a3dfd9d9c7cb5d847a6b932dcb83adf"} Jan 29 13:21:13 crc kubenswrapper[4787]: I0129 13:21:13.804688 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 29 13:21:14 crc kubenswrapper[4787]: I0129 13:21:14.997062 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 29 13:21:15 crc kubenswrapper[4787]: I0129 13:21:15.109668 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 29 13:21:15 crc kubenswrapper[4787]: I0129 13:21:15.206580 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:21:16 crc kubenswrapper[4787]: I0129 13:21:16.899982 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 29 13:21:17 crc kubenswrapper[4787]: I0129 13:21:17.162531 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 29 13:21:17 crc kubenswrapper[4787]: I0129 13:21:17.772412 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 29 13:21:17 crc kubenswrapper[4787]: I0129 13:21:17.783805 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:21:18 crc kubenswrapper[4787]: I0129 13:21:18.238001 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 29 13:21:18 crc kubenswrapper[4787]: I0129 13:21:18.264112 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 29 13:21:18 crc kubenswrapper[4787]: I0129 13:21:18.342148 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 29 13:21:18 crc kubenswrapper[4787]: I0129 13:21:18.434740 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 29 13:21:18 crc kubenswrapper[4787]: I0129 13:21:18.913296 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 29 13:21:19 crc kubenswrapper[4787]: I0129 13:21:19.262968 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 29 13:21:19 crc kubenswrapper[4787]: I0129 13:21:19.320976 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 29 13:21:19 crc kubenswrapper[4787]: I0129 13:21:19.321966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 29 13:21:19 crc kubenswrapper[4787]: I0129 13:21:19.392888 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 29 13:21:20 crc kubenswrapper[4787]: I0129 13:21:20.184698 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 13:21:20 crc kubenswrapper[4787]: I0129 13:21:20.582086 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 29 13:21:20 crc kubenswrapper[4787]: I0129 13:21:20.656888 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 29 13:21:20 crc kubenswrapper[4787]: I0129 13:21:20.781152 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:21:20 crc kubenswrapper[4787]: I0129 13:21:20.920076 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 29 13:21:21 crc kubenswrapper[4787]: I0129 13:21:21.077382 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 29 13:21:21 crc kubenswrapper[4787]: I0129 13:21:21.164646 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 29 13:21:21 crc kubenswrapper[4787]: I0129 13:21:21.166484 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 29 13:21:21 crc kubenswrapper[4787]: I0129 13:21:21.414978 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 29 13:21:21 crc kubenswrapper[4787]: I0129 13:21:21.645788 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 29 13:21:22 crc kubenswrapper[4787]: I0129 13:21:22.429761 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 29 13:21:22 crc kubenswrapper[4787]: I0129 13:21:22.565007 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerStarted","Data":"73dcb7c76033f7513ba7114a5cd026fdcb64f5090efa355c843a65a2be7d67d9"} Jan 29 13:21:22 crc kubenswrapper[4787]: I0129 13:21:22.633123 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 13:21:22 crc kubenswrapper[4787]: I0129 13:21:22.716615 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 29 13:21:23 crc kubenswrapper[4787]: I0129 13:21:23.036589 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 29 13:21:23 crc kubenswrapper[4787]: I0129 13:21:23.706906 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 29 13:21:24 crc kubenswrapper[4787]: I0129 13:21:24.795886 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 13:21:25 crc kubenswrapper[4787]: I0129 13:21:25.181391 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 29 13:21:26 crc kubenswrapper[4787]: I0129 13:21:26.757731 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 29 13:21:26 crc kubenswrapper[4787]: I0129 13:21:26.821706 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:21:27 crc kubenswrapper[4787]: I0129 13:21:27.079827 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 29 13:21:27 crc kubenswrapper[4787]: I0129 13:21:27.108844 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 29 13:21:27 crc kubenswrapper[4787]: I0129 13:21:27.163538 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.489859 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.562992 4787 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.564012 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n44gv" podStartSLOduration=76.129813709 podStartE2EDuration="2m51.563992226s" podCreationTimestamp="2026-01-29 13:18:37 +0000 UTC" firstStartedPulling="2026-01-29 13:18:39.935297423 +0000 UTC m=+158.696557709" lastFinishedPulling="2026-01-29 13:20:15.36947595 +0000 UTC m=+254.130736226" observedRunningTime="2026-01-29 13:20:31.634645018 +0000 UTC m=+270.395905304" watchObservedRunningTime="2026-01-29 13:21:28.563992226 +0000 UTC m=+327.325252502" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.566018 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j7bsl" podStartSLOduration=36.528568481 podStartE2EDuration="2m50.566007538s" podCreationTimestamp="2026-01-29 13:18:38 +0000 UTC" firstStartedPulling="2026-01-29 13:18:40.961718033 +0000 UTC m=+159.722978299" lastFinishedPulling="2026-01-29 13:20:54.99915708 +0000 UTC m=+293.760417356" observedRunningTime="2026-01-29 13:21:02.433701961 +0000 UTC m=+301.194962237" watchObservedRunningTime="2026-01-29 13:21:28.566007538 +0000 UTC m=+327.327267814" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.566372 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pww2l" podStartSLOduration=47.439501365 podStartE2EDuration="2m53.566366899s" podCreationTimestamp="2026-01-29 13:18:35 +0000 UTC" firstStartedPulling="2026-01-29 13:18:37.896052157 +0000 UTC m=+156.657312433" lastFinishedPulling="2026-01-29 13:20:44.022917691 +0000 UTC m=+282.784177967" observedRunningTime="2026-01-29 13:21:03.43950645 +0000 UTC m=+302.200766756" watchObservedRunningTime="2026-01-29 13:21:28.566366899 +0000 UTC m=+327.327627175" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.566485 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-87tmc" podStartSLOduration=10.963984756 podStartE2EDuration="2m50.566479812s" podCreationTimestamp="2026-01-29 13:18:38 +0000 UTC" firstStartedPulling="2026-01-29 13:18:40.967591495 +0000 UTC m=+159.728851771" lastFinishedPulling="2026-01-29 13:21:20.570086511 +0000 UTC m=+319.331346827" observedRunningTime="2026-01-29 13:21:22.586514792 +0000 UTC m=+321.347775118" watchObservedRunningTime="2026-01-29 13:21:28.566479812 +0000 UTC m=+327.327740088" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.566709 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fzxtm" podStartSLOduration=34.097356298 podStartE2EDuration="2m49.566703909s" podCreationTimestamp="2026-01-29 13:18:39 +0000 UTC" firstStartedPulling="2026-01-29 13:18:40.958155037 +0000 UTC m=+159.719415323" lastFinishedPulling="2026-01-29 13:20:56.427502658 +0000 UTC m=+295.188762934" observedRunningTime="2026-01-29 13:21:03.456674089 +0000 UTC m=+302.217934365" watchObservedRunningTime="2026-01-29 13:21:28.566703909 +0000 UTC m=+327.327964185" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.568109 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xp748" podStartSLOduration=36.488806922 podStartE2EDuration="2m52.568102432s" podCreationTimestamp="2026-01-29 13:18:36 +0000 UTC" firstStartedPulling="2026-01-29 13:18:38.919868261 +0000 UTC m=+157.681128537" lastFinishedPulling="2026-01-29 13:20:54.999163771 +0000 UTC m=+293.760424047" observedRunningTime="2026-01-29 13:21:01.425578464 +0000 UTC m=+300.186838740" watchObservedRunningTime="2026-01-29 13:21:28.568102432 +0000 UTC m=+327.329362708" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.568212 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wxvjz" podStartSLOduration=60.16254571 podStartE2EDuration="2m53.568208715s" podCreationTimestamp="2026-01-29 13:18:35 +0000 UTC" firstStartedPulling="2026-01-29 13:18:38.916547303 +0000 UTC m=+157.677807589" lastFinishedPulling="2026-01-29 13:20:32.322210308 +0000 UTC m=+271.083470594" observedRunningTime="2026-01-29 13:20:34.234945566 +0000 UTC m=+272.996205842" watchObservedRunningTime="2026-01-29 13:21:28.568208715 +0000 UTC m=+327.329468991" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.568625 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z6slf" podStartSLOduration=64.174183725 podStartE2EDuration="2m53.568620988s" podCreationTimestamp="2026-01-29 13:18:35 +0000 UTC" firstStartedPulling="2026-01-29 13:18:38.919918813 +0000 UTC m=+157.681179089" lastFinishedPulling="2026-01-29 13:20:28.314356076 +0000 UTC m=+267.075616352" observedRunningTime="2026-01-29 13:20:31.868502637 +0000 UTC m=+270.629762913" watchObservedRunningTime="2026-01-29 13:21:28.568620988 +0000 UTC m=+327.329881264" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.569655 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.569709 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.569739 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx","openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.570125 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" podUID="8a8076ca-17fd-4373-8891-1ea440b462b6" containerName="controller-manager" containerID="cri-o://b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb" gracePeriod=30 Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.571736 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" podUID="e05f7373-7245-46f6-ae2f-73bac6ebac37" containerName="route-controller-manager" containerID="cri-o://ad7ca122394636ac574a2f689d0e1331922e82de15a5c35389e5f913f28fdc55" gracePeriod=30 Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.597694 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=57.597675592 podStartE2EDuration="57.597675592s" podCreationTimestamp="2026-01-29 13:20:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:28.596511566 +0000 UTC m=+327.357771842" watchObservedRunningTime="2026-01-29 13:21:28.597675592 +0000 UTC m=+327.358935868" Jan 29 13:21:28 crc kubenswrapper[4787]: I0129 13:21:28.617849 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=53.617823461 podStartE2EDuration="53.617823461s" podCreationTimestamp="2026-01-29 13:20:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:28.617408868 +0000 UTC m=+327.378669144" watchObservedRunningTime="2026-01-29 13:21:28.617823461 +0000 UTC m=+327.379083737" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.131018 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.131159 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.174358 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.430288 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.623681 4787 generic.go:334] "Generic (PLEG): container finished" podID="e05f7373-7245-46f6-ae2f-73bac6ebac37" containerID="ad7ca122394636ac574a2f689d0e1331922e82de15a5c35389e5f913f28fdc55" exitCode=0 Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.623809 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" event={"ID":"e05f7373-7245-46f6-ae2f-73bac6ebac37","Type":"ContainerDied","Data":"ad7ca122394636ac574a2f689d0e1331922e82de15a5c35389e5f913f28fdc55"} Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.680219 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.809594 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 29 13:21:29 crc kubenswrapper[4787]: I0129 13:21:29.947252 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.187929 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.220698 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:30 crc kubenswrapper[4787]: E0129 13:21:30.220955 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05f7373-7245-46f6-ae2f-73bac6ebac37" containerName="route-controller-manager" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.220969 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05f7373-7245-46f6-ae2f-73bac6ebac37" containerName="route-controller-manager" Jan 29 13:21:30 crc kubenswrapper[4787]: E0129 13:21:30.220987 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" containerName="installer" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.220993 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" containerName="installer" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.221080 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e05f7373-7245-46f6-ae2f-73bac6ebac37" containerName="route-controller-manager" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.221092 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3575cdac-d663-47d0-a7bb-7445da552f35" containerName="installer" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.221844 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.231701 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.309671 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.378639 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config\") pod \"e05f7373-7245-46f6-ae2f-73bac6ebac37\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.378811 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj2tr\" (UniqueName: \"kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr\") pod \"e05f7373-7245-46f6-ae2f-73bac6ebac37\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.378906 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert\") pod \"e05f7373-7245-46f6-ae2f-73bac6ebac37\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.378930 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca\") pod \"e05f7373-7245-46f6-ae2f-73bac6ebac37\" (UID: \"e05f7373-7245-46f6-ae2f-73bac6ebac37\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379125 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379173 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvb8z\" (UniqueName: \"kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379210 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379313 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379720 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca" (OuterVolumeSpecName: "client-ca") pod "e05f7373-7245-46f6-ae2f-73bac6ebac37" (UID: "e05f7373-7245-46f6-ae2f-73bac6ebac37"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.379808 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config" (OuterVolumeSpecName: "config") pod "e05f7373-7245-46f6-ae2f-73bac6ebac37" (UID: "e05f7373-7245-46f6-ae2f-73bac6ebac37"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.385517 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr" (OuterVolumeSpecName: "kube-api-access-pj2tr") pod "e05f7373-7245-46f6-ae2f-73bac6ebac37" (UID: "e05f7373-7245-46f6-ae2f-73bac6ebac37"). InnerVolumeSpecName "kube-api-access-pj2tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.385602 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e05f7373-7245-46f6-ae2f-73bac6ebac37" (UID: "e05f7373-7245-46f6-ae2f-73bac6ebac37"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.480658 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config\") pod \"8a8076ca-17fd-4373-8891-1ea440b462b6\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481274 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca\") pod \"8a8076ca-17fd-4373-8891-1ea440b462b6\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481330 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6crs2\" (UniqueName: \"kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2\") pod \"8a8076ca-17fd-4373-8891-1ea440b462b6\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481386 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert\") pod \"8a8076ca-17fd-4373-8891-1ea440b462b6\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481439 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles\") pod \"8a8076ca-17fd-4373-8891-1ea440b462b6\" (UID: \"8a8076ca-17fd-4373-8891-1ea440b462b6\") " Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481661 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvb8z\" (UniqueName: \"kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481712 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.481799 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482085 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca" (OuterVolumeSpecName: "client-ca") pod "8a8076ca-17fd-4373-8891-1ea440b462b6" (UID: "8a8076ca-17fd-4373-8891-1ea440b462b6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482317 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config" (OuterVolumeSpecName: "config") pod "8a8076ca-17fd-4373-8891-1ea440b462b6" (UID: "8a8076ca-17fd-4373-8891-1ea440b462b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482721 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482885 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482908 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482919 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj2tr\" (UniqueName: \"kubernetes.io/projected/e05f7373-7245-46f6-ae2f-73bac6ebac37-kube-api-access-pj2tr\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482933 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e05f7373-7245-46f6-ae2f-73bac6ebac37-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482943 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e05f7373-7245-46f6-ae2f-73bac6ebac37-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.482957 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.483507 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.484599 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.484694 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "8a8076ca-17fd-4373-8891-1ea440b462b6" (UID: "8a8076ca-17fd-4373-8891-1ea440b462b6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.486484 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8a8076ca-17fd-4373-8891-1ea440b462b6" (UID: "8a8076ca-17fd-4373-8891-1ea440b462b6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.486846 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2" (OuterVolumeSpecName: "kube-api-access-6crs2") pod "8a8076ca-17fd-4373-8891-1ea440b462b6" (UID: "8a8076ca-17fd-4373-8891-1ea440b462b6"). InnerVolumeSpecName "kube-api-access-6crs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.487226 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.510879 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvb8z\" (UniqueName: \"kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z\") pod \"route-controller-manager-69dc6bf7d8-krwlg\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.552577 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.584572 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6crs2\" (UniqueName: \"kubernetes.io/projected/8a8076ca-17fd-4373-8891-1ea440b462b6-kube-api-access-6crs2\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.584632 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8a8076ca-17fd-4373-8891-1ea440b462b6-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.584653 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8a8076ca-17fd-4373-8891-1ea440b462b6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.631000 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.631041 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx" event={"ID":"e05f7373-7245-46f6-ae2f-73bac6ebac37","Type":"ContainerDied","Data":"db44dce8eb960be685ed89cdc76e6cd360be7e5a8399e4a23045c77c5a2d0efb"} Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.631423 4787 scope.go:117] "RemoveContainer" containerID="ad7ca122394636ac574a2f689d0e1331922e82de15a5c35389e5f913f28fdc55" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.633662 4787 generic.go:334] "Generic (PLEG): container finished" podID="8a8076ca-17fd-4373-8891-1ea440b462b6" containerID="b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb" exitCode=0 Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.633986 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.634545 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" event={"ID":"8a8076ca-17fd-4373-8891-1ea440b462b6","Type":"ContainerDied","Data":"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb"} Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.634734 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8bcdb7fcd-chksx" event={"ID":"8a8076ca-17fd-4373-8891-1ea440b462b6","Type":"ContainerDied","Data":"4378221d84b50d75ed45f693fece056ed53e52d2b7249ab786cecc5d6e8d457b"} Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.651901 4787 scope.go:117] "RemoveContainer" containerID="b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.673250 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx"] Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.678213 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b64bd8c5d-szccx"] Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.691616 4787 scope.go:117] "RemoveContainer" containerID="b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb" Jan 29 13:21:30 crc kubenswrapper[4787]: E0129 13:21:30.692622 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb\": container with ID starting with b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb not found: ID does not exist" containerID="b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.692662 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb"} err="failed to get container status \"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb\": rpc error: code = NotFound desc = could not find container \"b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb\": container with ID starting with b78d3a75fb5f209bfd0b5cb71346677352c747d3fb3ac13f5d2aa870a7486beb not found: ID does not exist" Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.697069 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.699544 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8bcdb7fcd-chksx"] Jan 29 13:21:30 crc kubenswrapper[4787]: I0129 13:21:30.866673 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.019008 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.135825 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.164410 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.168344 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.268059 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.645038 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" event={"ID":"7169578b-e2c5-468b-afc0-12b8ecd79c37","Type":"ContainerStarted","Data":"9a1fc0217506608ac242df9c067e42e981e743feee55ec5b64f7fdb9d108bb1d"} Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.645099 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" event={"ID":"7169578b-e2c5-468b-afc0-12b8ecd79c37","Type":"ContainerStarted","Data":"0ae4027e102a35ce8fee121cc4f036ed4dda7364afb7541c2f96d7c05ebe6a96"} Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.993707 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a8076ca-17fd-4373-8891-1ea440b462b6" path="/var/lib/kubelet/pods/8a8076ca-17fd-4373-8891-1ea440b462b6/volumes" Jan 29 13:21:31 crc kubenswrapper[4787]: I0129 13:21:31.994268 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e05f7373-7245-46f6-ae2f-73bac6ebac37" path="/var/lib/kubelet/pods/e05f7373-7245-46f6-ae2f-73bac6ebac37/volumes" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.078715 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.555094 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.609507 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:32 crc kubenswrapper[4787]: E0129 13:21:32.609807 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a8076ca-17fd-4373-8891-1ea440b462b6" containerName="controller-manager" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.609827 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a8076ca-17fd-4373-8891-1ea440b462b6" containerName="controller-manager" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.609968 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a8076ca-17fd-4373-8891-1ea440b462b6" containerName="controller-manager" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.610440 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.613307 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.613684 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.613868 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.614345 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.614758 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.615025 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.627200 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.631880 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.658234 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.666524 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.677303 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" podStartSLOduration=4.677282136 podStartE2EDuration="4.677282136s" podCreationTimestamp="2026-01-29 13:21:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:32.674001765 +0000 UTC m=+331.435262031" watchObservedRunningTime="2026-01-29 13:21:32.677282136 +0000 UTC m=+331.438542412" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.717241 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.717388 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.717427 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcffh\" (UniqueName: \"kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.717488 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.717551 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.819633 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.819738 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.819872 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.819951 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcffh\" (UniqueName: \"kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.820024 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.822401 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.822612 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.822943 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.830727 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.838906 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcffh\" (UniqueName: \"kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh\") pod \"controller-manager-54569b9b64-gxlm2\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:32 crc kubenswrapper[4787]: I0129 13:21:32.940516 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:33 crc kubenswrapper[4787]: I0129 13:21:33.372930 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:33 crc kubenswrapper[4787]: I0129 13:21:33.671313 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" event={"ID":"23a8f51c-eb4d-4647-bdbd-6d868bf86f35","Type":"ContainerStarted","Data":"c8f14ceb11fed65954c86ffbc6ba528d0fe20b218583d215ec244e16fd32b654"} Jan 29 13:21:34 crc kubenswrapper[4787]: I0129 13:21:34.680180 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" event={"ID":"23a8f51c-eb4d-4647-bdbd-6d868bf86f35","Type":"ContainerStarted","Data":"c53071629ecf2eaf9378e61477d1a9e0a78b3e498c605cb773e5f9ed1841bcf6"} Jan 29 13:21:34 crc kubenswrapper[4787]: I0129 13:21:34.680981 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:34 crc kubenswrapper[4787]: I0129 13:21:34.689925 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:34 crc kubenswrapper[4787]: I0129 13:21:34.727171 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" podStartSLOduration=6.727141591 podStartE2EDuration="6.727141591s" podCreationTimestamp="2026-01-29 13:21:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:34.704712512 +0000 UTC m=+333.465972788" watchObservedRunningTime="2026-01-29 13:21:34.727141591 +0000 UTC m=+333.488401867" Jan 29 13:21:35 crc kubenswrapper[4787]: I0129 13:21:35.039671 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 29 13:21:36 crc kubenswrapper[4787]: I0129 13:21:36.346153 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 13:21:37 crc kubenswrapper[4787]: I0129 13:21:37.532789 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 29 13:21:38 crc kubenswrapper[4787]: I0129 13:21:38.244930 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 29 13:21:40 crc kubenswrapper[4787]: I0129 13:21:40.760330 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 29 13:21:42 crc kubenswrapper[4787]: I0129 13:21:42.304951 4787 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 13:21:42 crc kubenswrapper[4787]: I0129 13:21:42.305734 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://f6550a6b39a1dd7439664de0cbb81b01b81f30ad40cfda49d8efe775f0cf1416" gracePeriod=5 Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.508611 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.509040 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wxvjz" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="registry-server" containerID="cri-o://76f25c68cc6c694e3915654e2605f2509d97f1fb4b82758ecd1500eb306b10c4" gracePeriod=2 Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.707854 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.708494 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xp748" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="registry-server" containerID="cri-o://62972b3c1510c2313de2aa93c401815995ff412103c3e9fcc1f9aa45b664aeab" gracePeriod=2 Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.765364 4787 generic.go:334] "Generic (PLEG): container finished" podID="2236dfad-b7be-4375-9661-287dbeeec969" containerID="76f25c68cc6c694e3915654e2605f2509d97f1fb4b82758ecd1500eb306b10c4" exitCode=0 Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.765422 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerDied","Data":"76f25c68cc6c694e3915654e2605f2509d97f1fb4b82758ecd1500eb306b10c4"} Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.766984 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 13:21:47 crc kubenswrapper[4787]: I0129 13:21:47.767024 4787 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="f6550a6b39a1dd7439664de0cbb81b01b81f30ad40cfda49d8efe775f0cf1416" exitCode=137 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.233379 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.234265 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" podUID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" containerName="controller-manager" containerID="cri-o://c53071629ecf2eaf9378e61477d1a9e0a78b3e498c605cb773e5f9ed1841bcf6" gracePeriod=30 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.249376 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.249510 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.259973 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260020 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260088 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260122 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260157 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260406 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260407 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260513 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.260564 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.261002 4787 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.261036 4787 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.261049 4787 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.261060 4787 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.272812 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.336101 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.336401 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" podUID="7169578b-e2c5-468b-afc0-12b8ecd79c37" containerName="route-controller-manager" containerID="cri-o://9a1fc0217506608ac242df9c067e42e981e743feee55ec5b64f7fdb9d108bb1d" gracePeriod=30 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.381171 4787 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.620494 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.775544 4787 generic.go:334] "Generic (PLEG): container finished" podID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" containerID="c53071629ecf2eaf9378e61477d1a9e0a78b3e498c605cb773e5f9ed1841bcf6" exitCode=0 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.775621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" event={"ID":"23a8f51c-eb4d-4647-bdbd-6d868bf86f35","Type":"ContainerDied","Data":"c53071629ecf2eaf9378e61477d1a9e0a78b3e498c605cb773e5f9ed1841bcf6"} Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.779183 4787 generic.go:334] "Generic (PLEG): container finished" podID="7169578b-e2c5-468b-afc0-12b8ecd79c37" containerID="9a1fc0217506608ac242df9c067e42e981e743feee55ec5b64f7fdb9d108bb1d" exitCode=0 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.779282 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" event={"ID":"7169578b-e2c5-468b-afc0-12b8ecd79c37","Type":"ContainerDied","Data":"9a1fc0217506608ac242df9c067e42e981e743feee55ec5b64f7fdb9d108bb1d"} Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.784984 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content\") pod \"2236dfad-b7be-4375-9661-287dbeeec969\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.785192 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgdjb\" (UniqueName: \"kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb\") pod \"2236dfad-b7be-4375-9661-287dbeeec969\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.785238 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities\") pod \"2236dfad-b7be-4375-9661-287dbeeec969\" (UID: \"2236dfad-b7be-4375-9661-287dbeeec969\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.785837 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.786001 4787 scope.go:117] "RemoveContainer" containerID="f6550a6b39a1dd7439664de0cbb81b01b81f30ad40cfda49d8efe775f0cf1416" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.786148 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.786675 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities" (OuterVolumeSpecName: "utilities") pod "2236dfad-b7be-4375-9661-287dbeeec969" (UID: "2236dfad-b7be-4375-9661-287dbeeec969"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.793383 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb" (OuterVolumeSpecName: "kube-api-access-sgdjb") pod "2236dfad-b7be-4375-9661-287dbeeec969" (UID: "2236dfad-b7be-4375-9661-287dbeeec969"). InnerVolumeSpecName "kube-api-access-sgdjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.801319 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wxvjz" event={"ID":"2236dfad-b7be-4375-9661-287dbeeec969","Type":"ContainerDied","Data":"38606333fdc4c6888616addf0cdb0f0024c8e95c3289b7c26ba4387ad6735f6e"} Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.801487 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wxvjz" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.804652 4787 generic.go:334] "Generic (PLEG): container finished" podID="52138722-381d-473d-85ab-f4961a18819c" containerID="62972b3c1510c2313de2aa93c401815995ff412103c3e9fcc1f9aa45b664aeab" exitCode=0 Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.804728 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerDied","Data":"62972b3c1510c2313de2aa93c401815995ff412103c3e9fcc1f9aa45b664aeab"} Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.816729 4787 scope.go:117] "RemoveContainer" containerID="76f25c68cc6c694e3915654e2605f2509d97f1fb4b82758ecd1500eb306b10c4" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.831685 4787 scope.go:117] "RemoveContainer" containerID="8250f58a4cc6bcae572f251756c9d2615dd1191813b96fd258e3f9e127fd3220" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.836956 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.853806 4787 scope.go:117] "RemoveContainer" containerID="b54618db470368026ad0b49856b86067e7bc6dcec9f86f923837605b27ddd844" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.855340 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2236dfad-b7be-4375-9661-287dbeeec969" (UID: "2236dfad-b7be-4375-9661-287dbeeec969"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.887374 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgdjb\" (UniqueName: \"kubernetes.io/projected/2236dfad-b7be-4375-9661-287dbeeec969-kube-api-access-sgdjb\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.887413 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.887427 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2236dfad-b7be-4375-9661-287dbeeec969-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.982565 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.988466 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert\") pod \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.988652 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content\") pod \"52138722-381d-473d-85ab-f4961a18819c\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.988827 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcffh\" (UniqueName: \"kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh\") pod \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.988937 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles\") pod \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.989127 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities\") pod \"52138722-381d-473d-85ab-f4961a18819c\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.989248 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsqb5\" (UniqueName: \"kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5\") pod \"52138722-381d-473d-85ab-f4961a18819c\" (UID: \"52138722-381d-473d-85ab-f4961a18819c\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.989369 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config\") pod \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.989517 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca\") pod \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\" (UID: \"23a8f51c-eb4d-4647-bdbd-6d868bf86f35\") " Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.989967 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities" (OuterVolumeSpecName: "utilities") pod "52138722-381d-473d-85ab-f4961a18819c" (UID: "52138722-381d-473d-85ab-f4961a18819c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.990051 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca" (OuterVolumeSpecName: "client-ca") pod "23a8f51c-eb4d-4647-bdbd-6d868bf86f35" (UID: "23a8f51c-eb4d-4647-bdbd-6d868bf86f35"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.990219 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config" (OuterVolumeSpecName: "config") pod "23a8f51c-eb4d-4647-bdbd-6d868bf86f35" (UID: "23a8f51c-eb4d-4647-bdbd-6d868bf86f35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.990279 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "23a8f51c-eb4d-4647-bdbd-6d868bf86f35" (UID: "23a8f51c-eb4d-4647-bdbd-6d868bf86f35"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.993498 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh" (OuterVolumeSpecName: "kube-api-access-mcffh") pod "23a8f51c-eb4d-4647-bdbd-6d868bf86f35" (UID: "23a8f51c-eb4d-4647-bdbd-6d868bf86f35"). InnerVolumeSpecName "kube-api-access-mcffh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.993606 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "23a8f51c-eb4d-4647-bdbd-6d868bf86f35" (UID: "23a8f51c-eb4d-4647-bdbd-6d868bf86f35"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:21:48 crc kubenswrapper[4787]: I0129 13:21:48.995602 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5" (OuterVolumeSpecName: "kube-api-access-hsqb5") pod "52138722-381d-473d-85ab-f4961a18819c" (UID: "52138722-381d-473d-85ab-f4961a18819c"). InnerVolumeSpecName "kube-api-access-hsqb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.045371 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52138722-381d-473d-85ab-f4961a18819c" (UID: "52138722-381d-473d-85ab-f4961a18819c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090535 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090599 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcffh\" (UniqueName: \"kubernetes.io/projected/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-kube-api-access-mcffh\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090615 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090626 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52138722-381d-473d-85ab-f4961a18819c-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090637 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsqb5\" (UniqueName: \"kubernetes.io/projected/52138722-381d-473d-85ab-f4961a18819c-kube-api-access-hsqb5\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090649 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090661 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.090672 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a8f51c-eb4d-4647-bdbd-6d868bf86f35-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.137348 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.145695 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wxvjz"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.305531 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.494695 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert\") pod \"7169578b-e2c5-468b-afc0-12b8ecd79c37\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.494803 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config\") pod \"7169578b-e2c5-468b-afc0-12b8ecd79c37\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.494855 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvb8z\" (UniqueName: \"kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z\") pod \"7169578b-e2c5-468b-afc0-12b8ecd79c37\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.494910 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca\") pod \"7169578b-e2c5-468b-afc0-12b8ecd79c37\" (UID: \"7169578b-e2c5-468b-afc0-12b8ecd79c37\") " Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.495921 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca" (OuterVolumeSpecName: "client-ca") pod "7169578b-e2c5-468b-afc0-12b8ecd79c37" (UID: "7169578b-e2c5-468b-afc0-12b8ecd79c37"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.497145 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config" (OuterVolumeSpecName: "config") pod "7169578b-e2c5-468b-afc0-12b8ecd79c37" (UID: "7169578b-e2c5-468b-afc0-12b8ecd79c37"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.499439 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z" (OuterVolumeSpecName: "kube-api-access-gvb8z") pod "7169578b-e2c5-468b-afc0-12b8ecd79c37" (UID: "7169578b-e2c5-468b-afc0-12b8ecd79c37"). InnerVolumeSpecName "kube-api-access-gvb8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.500073 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7169578b-e2c5-468b-afc0-12b8ecd79c37" (UID: "7169578b-e2c5-468b-afc0-12b8ecd79c37"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.596695 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7169578b-e2c5-468b-afc0-12b8ecd79c37-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.596765 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.596792 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvb8z\" (UniqueName: \"kubernetes.io/projected/7169578b-e2c5-468b-afc0-12b8ecd79c37-kube-api-access-gvb8z\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.596820 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7169578b-e2c5-468b-afc0-12b8ecd79c37-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.622540 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623304 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="extract-utilities" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623367 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="extract-utilities" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623397 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="extract-utilities" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623410 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="extract-utilities" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623504 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7169578b-e2c5-468b-afc0-12b8ecd79c37" containerName="route-controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623518 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7169578b-e2c5-468b-afc0-12b8ecd79c37" containerName="route-controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623548 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623599 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623614 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" containerName="controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623626 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" containerName="controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623643 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="extract-content" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623691 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="extract-content" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623713 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623725 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623770 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="extract-content" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623784 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="extract-content" Jan 29 13:21:49 crc kubenswrapper[4787]: E0129 13:21:49.623800 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.623812 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.624213 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" containerName="controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.624244 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.624305 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="52138722-381d-473d-85ab-f4961a18819c" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.624327 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7169578b-e2c5-468b-afc0-12b8ecd79c37" containerName="route-controller-manager" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.624343 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2236dfad-b7be-4375-9661-287dbeeec969" containerName="registry-server" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.625242 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.626282 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.627365 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.635318 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.644015 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698263 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698323 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698353 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m472w\" (UniqueName: \"kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698392 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698443 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhl5d\" (UniqueName: \"kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698503 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698535 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698559 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.698619 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.799978 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800066 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800111 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m472w\" (UniqueName: \"kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800169 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800379 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhl5d\" (UniqueName: \"kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800428 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800492 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800525 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.800574 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.801692 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.801811 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.802410 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.803204 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.804028 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.806727 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.815217 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.817229 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m472w\" (UniqueName: \"kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w\") pod \"controller-manager-8676955c6b-92dhq\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.817277 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp748" event={"ID":"52138722-381d-473d-85ab-f4961a18819c","Type":"ContainerDied","Data":"e1f67774061c3891dd5207465f0d66439d8bb120b4147a2cebf554a1bcaaf2fc"} Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.817512 4787 scope.go:117] "RemoveContainer" containerID="62972b3c1510c2313de2aa93c401815995ff412103c3e9fcc1f9aa45b664aeab" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.817305 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp748" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.819268 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" event={"ID":"23a8f51c-eb4d-4647-bdbd-6d868bf86f35","Type":"ContainerDied","Data":"c8f14ceb11fed65954c86ffbc6ba528d0fe20b218583d215ec244e16fd32b654"} Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.819326 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-54569b9b64-gxlm2" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.823488 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" event={"ID":"7169578b-e2c5-468b-afc0-12b8ecd79c37","Type":"ContainerDied","Data":"0ae4027e102a35ce8fee121cc4f036ed4dda7364afb7541c2f96d7c05ebe6a96"} Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.823642 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.833058 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhl5d\" (UniqueName: \"kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d\") pod \"route-controller-manager-54f9489466-vrz6t\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.853881 4787 scope.go:117] "RemoveContainer" containerID="51bc985f987d9fb5e30f8747bd319582deffda8c49afb8095b2f6b0b37abcf3d" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.857351 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.866608 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-54569b9b64-gxlm2"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.871448 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.874212 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xp748"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.883727 4787 scope.go:117] "RemoveContainer" containerID="54822d283b11626e3eaccceb5d490f6a52e936d1f32faaea0fed45b527abcfef" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.885739 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.894296 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69dc6bf7d8-krwlg"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.898583 4787 scope.go:117] "RemoveContainer" containerID="c53071629ecf2eaf9378e61477d1a9e0a78b3e498c605cb773e5f9ed1841bcf6" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.906991 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.907309 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j7bsl" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="registry-server" containerID="cri-o://3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e" gracePeriod=2 Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.918762 4787 scope.go:117] "RemoveContainer" containerID="9a1fc0217506608ac242df9c067e42e981e743feee55ec5b64f7fdb9d108bb1d" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.965835 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:49 crc kubenswrapper[4787]: I0129 13:21:49.979386 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.002087 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2236dfad-b7be-4375-9661-287dbeeec969" path="/var/lib/kubelet/pods/2236dfad-b7be-4375-9661-287dbeeec969/volumes" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.005045 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23a8f51c-eb4d-4647-bdbd-6d868bf86f35" path="/var/lib/kubelet/pods/23a8f51c-eb4d-4647-bdbd-6d868bf86f35/volumes" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.006629 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52138722-381d-473d-85ab-f4961a18819c" path="/var/lib/kubelet/pods/52138722-381d-473d-85ab-f4961a18819c/volumes" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.009443 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7169578b-e2c5-468b-afc0-12b8ecd79c37" path="/var/lib/kubelet/pods/7169578b-e2c5-468b-afc0-12b8ecd79c37/volumes" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.010175 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.010543 4787 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.031493 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.031547 4787 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5756fcd5-0ed9-4818-abd1-c37ef1574d9b" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.039108 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.039144 4787 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="5756fcd5-0ed9-4818-abd1-c37ef1574d9b" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.110232 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.110599 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fzxtm" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="registry-server" containerID="cri-o://2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611" gracePeriod=2 Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.349565 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.414972 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.511296 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlwvh\" (UniqueName: \"kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh\") pod \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.511439 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities\") pod \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.511511 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content\") pod \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\" (UID: \"5c3abece-22a6-44f4-9b3d-77ad9eed03b7\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.512596 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities" (OuterVolumeSpecName: "utilities") pod "5c3abece-22a6-44f4-9b3d-77ad9eed03b7" (UID: "5c3abece-22a6-44f4-9b3d-77ad9eed03b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.523847 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh" (OuterVolumeSpecName: "kube-api-access-vlwvh") pod "5c3abece-22a6-44f4-9b3d-77ad9eed03b7" (UID: "5c3abece-22a6-44f4-9b3d-77ad9eed03b7"). InnerVolumeSpecName "kube-api-access-vlwvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.531136 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.541138 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c3abece-22a6-44f4-9b3d-77ad9eed03b7" (UID: "5c3abece-22a6-44f4-9b3d-77ad9eed03b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.579826 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.612729 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlwvh\" (UniqueName: \"kubernetes.io/projected/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-kube-api-access-vlwvh\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.612771 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.612781 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c3abece-22a6-44f4-9b3d-77ad9eed03b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.713825 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content\") pod \"df99e00f-9a78-454e-9f8e-5da684f374a1\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.713901 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqvsf\" (UniqueName: \"kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf\") pod \"df99e00f-9a78-454e-9f8e-5da684f374a1\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.713991 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities\") pod \"df99e00f-9a78-454e-9f8e-5da684f374a1\" (UID: \"df99e00f-9a78-454e-9f8e-5da684f374a1\") " Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.715021 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities" (OuterVolumeSpecName: "utilities") pod "df99e00f-9a78-454e-9f8e-5da684f374a1" (UID: "df99e00f-9a78-454e-9f8e-5da684f374a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.719532 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf" (OuterVolumeSpecName: "kube-api-access-lqvsf") pod "df99e00f-9a78-454e-9f8e-5da684f374a1" (UID: "df99e00f-9a78-454e-9f8e-5da684f374a1"). InnerVolumeSpecName "kube-api-access-lqvsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.816012 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.816055 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqvsf\" (UniqueName: \"kubernetes.io/projected/df99e00f-9a78-454e-9f8e-5da684f374a1-kube-api-access-lqvsf\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.831576 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" event={"ID":"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68","Type":"ContainerStarted","Data":"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.831637 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" event={"ID":"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68","Type":"ContainerStarted","Data":"b62c7ef5b8131432c268b598682c40792cba0db0c7b0664ef94d7647b3b5a04c"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.832082 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.834276 4787 generic.go:334] "Generic (PLEG): container finished" podID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerID="2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611" exitCode=0 Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.834335 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerDied","Data":"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.834358 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzxtm" event={"ID":"df99e00f-9a78-454e-9f8e-5da684f374a1","Type":"ContainerDied","Data":"9017cfe0fb14b91c0fd5cb9a243bacb6145cca29d5a12a0e5e54678d1e13a523"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.834381 4787 scope.go:117] "RemoveContainer" containerID="2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.834523 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzxtm" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.838327 4787 generic.go:334] "Generic (PLEG): container finished" podID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerID="3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e" exitCode=0 Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.838384 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerDied","Data":"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.838406 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j7bsl" event={"ID":"5c3abece-22a6-44f4-9b3d-77ad9eed03b7","Type":"ContainerDied","Data":"aaac0045a3b347295814ced7f38172234f51a9e88194a2aed65a71aa7c72abd3"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.838497 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j7bsl" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.839584 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.842536 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" event={"ID":"d98b30cd-c5bf-48e9-880f-e67bd60a2c51","Type":"ContainerStarted","Data":"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.842574 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" event={"ID":"d98b30cd-c5bf-48e9-880f-e67bd60a2c51","Type":"ContainerStarted","Data":"cb6e59a30242ab54857a8776d5ed02a2951d636951504dd9fb4d258cb8125aae"} Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.843423 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.847765 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" podStartSLOduration=2.847742824 podStartE2EDuration="2.847742824s" podCreationTimestamp="2026-01-29 13:21:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:50.846648731 +0000 UTC m=+349.607909007" watchObservedRunningTime="2026-01-29 13:21:50.847742824 +0000 UTC m=+349.609003100" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.859704 4787 scope.go:117] "RemoveContainer" containerID="8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.882626 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" podStartSLOduration=2.882600306 podStartE2EDuration="2.882600306s" podCreationTimestamp="2026-01-29 13:21:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:21:50.880342927 +0000 UTC m=+349.641603203" watchObservedRunningTime="2026-01-29 13:21:50.882600306 +0000 UTC m=+349.643860582" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.887400 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df99e00f-9a78-454e-9f8e-5da684f374a1" (UID: "df99e00f-9a78-454e-9f8e-5da684f374a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.898401 4787 scope.go:117] "RemoveContainer" containerID="cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.918837 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df99e00f-9a78-454e-9f8e-5da684f374a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.931094 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.931155 4787 scope.go:117] "RemoveContainer" containerID="2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.935166 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j7bsl"] Jan 29 13:21:50 crc kubenswrapper[4787]: E0129 13:21:50.936978 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611\": container with ID starting with 2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611 not found: ID does not exist" containerID="2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.937038 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611"} err="failed to get container status \"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611\": rpc error: code = NotFound desc = could not find container \"2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611\": container with ID starting with 2969d7a6cedc839a32c0d2da05212c42641cd554b5d1f31566603cec4d4e4611 not found: ID does not exist" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.937071 4787 scope.go:117] "RemoveContainer" containerID="8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1" Jan 29 13:21:50 crc kubenswrapper[4787]: E0129 13:21:50.937679 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1\": container with ID starting with 8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1 not found: ID does not exist" containerID="8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.937731 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1"} err="failed to get container status \"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1\": rpc error: code = NotFound desc = could not find container \"8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1\": container with ID starting with 8368a8107011e78bb0c8aab72089f7eb183c1e92df1788999da7b5d2dbc714c1 not found: ID does not exist" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.937771 4787 scope.go:117] "RemoveContainer" containerID="cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c" Jan 29 13:21:50 crc kubenswrapper[4787]: E0129 13:21:50.938259 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c\": container with ID starting with cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c not found: ID does not exist" containerID="cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.938309 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c"} err="failed to get container status \"cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c\": rpc error: code = NotFound desc = could not find container \"cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c\": container with ID starting with cdaca46de4004471e91fa499ffbe486cb85230e58f636d4e44808c05d439566c not found: ID does not exist" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.938327 4787 scope.go:117] "RemoveContainer" containerID="3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.966064 4787 scope.go:117] "RemoveContainer" containerID="46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080" Jan 29 13:21:50 crc kubenswrapper[4787]: I0129 13:21:50.990784 4787 scope.go:117] "RemoveContainer" containerID="45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.006972 4787 scope.go:117] "RemoveContainer" containerID="3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e" Jan 29 13:21:51 crc kubenswrapper[4787]: E0129 13:21:51.007564 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e\": container with ID starting with 3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e not found: ID does not exist" containerID="3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.007632 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e"} err="failed to get container status \"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e\": rpc error: code = NotFound desc = could not find container \"3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e\": container with ID starting with 3601a5158e3a8107202843c9beb8021ba5bf6cd5f904d11c23ba11317684a85e not found: ID does not exist" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.007680 4787 scope.go:117] "RemoveContainer" containerID="46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080" Jan 29 13:21:51 crc kubenswrapper[4787]: E0129 13:21:51.008716 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080\": container with ID starting with 46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080 not found: ID does not exist" containerID="46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.008755 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080"} err="failed to get container status \"46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080\": rpc error: code = NotFound desc = could not find container \"46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080\": container with ID starting with 46097b7735a579c464b75176f48e98a6ea430042027f1c52c8ae5a700b2bb080 not found: ID does not exist" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.008784 4787 scope.go:117] "RemoveContainer" containerID="45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72" Jan 29 13:21:51 crc kubenswrapper[4787]: E0129 13:21:51.009372 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72\": container with ID starting with 45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72 not found: ID does not exist" containerID="45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.009406 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72"} err="failed to get container status \"45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72\": rpc error: code = NotFound desc = could not find container \"45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72\": container with ID starting with 45b6f38cdae7e8dcd9d2f72d1715de7e004ee96bbce8249b109d9d2a41e54a72 not found: ID does not exist" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.193170 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.222259 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.227123 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fzxtm"] Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.993055 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" path="/var/lib/kubelet/pods/5c3abece-22a6-44f4-9b3d-77ad9eed03b7/volumes" Jan 29 13:21:51 crc kubenswrapper[4787]: I0129 13:21:51.994422 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" path="/var/lib/kubelet/pods/df99e00f-9a78-454e-9f8e-5da684f374a1/volumes" Jan 29 13:21:58 crc kubenswrapper[4787]: I0129 13:21:58.395094 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:21:58 crc kubenswrapper[4787]: I0129 13:21:58.396012 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.194177 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.195406 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" podUID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" containerName="controller-manager" containerID="cri-o://92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c" gracePeriod=30 Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.239494 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.240901 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" podUID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" containerName="route-controller-manager" containerID="cri-o://4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660" gracePeriod=30 Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.897598 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.924473 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.962330 4787 generic.go:334] "Generic (PLEG): container finished" podID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" containerID="4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660" exitCode=0 Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.962399 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.962425 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" event={"ID":"d98b30cd-c5bf-48e9-880f-e67bd60a2c51","Type":"ContainerDied","Data":"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660"} Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.963334 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t" event={"ID":"d98b30cd-c5bf-48e9-880f-e67bd60a2c51","Type":"ContainerDied","Data":"cb6e59a30242ab54857a8776d5ed02a2951d636951504dd9fb4d258cb8125aae"} Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.963365 4787 scope.go:117] "RemoveContainer" containerID="4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.967767 4787 generic.go:334] "Generic (PLEG): container finished" podID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" containerID="92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c" exitCode=0 Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.967822 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" event={"ID":"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68","Type":"ContainerDied","Data":"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c"} Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.967858 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" event={"ID":"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68","Type":"ContainerDied","Data":"b62c7ef5b8131432c268b598682c40792cba0db0c7b0664ef94d7647b3b5a04c"} Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.967920 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8676955c6b-92dhq" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.989109 4787 scope.go:117] "RemoveContainer" containerID="4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660" Jan 29 13:22:08 crc kubenswrapper[4787]: E0129 13:22:08.989899 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660\": container with ID starting with 4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660 not found: ID does not exist" containerID="4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.989948 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660"} err="failed to get container status \"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660\": rpc error: code = NotFound desc = could not find container \"4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660\": container with ID starting with 4cbe18d5a7b32f5ce2a6c86f9e2e36797c4b7c8106837aad521ab6ff47ed7660 not found: ID does not exist" Jan 29 13:22:08 crc kubenswrapper[4787]: I0129 13:22:08.989999 4787 scope.go:117] "RemoveContainer" containerID="92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.007440 4787 scope.go:117] "RemoveContainer" containerID="92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.008522 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c\": container with ID starting with 92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c not found: ID does not exist" containerID="92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.008570 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c"} err="failed to get container status \"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c\": rpc error: code = NotFound desc = could not find container \"92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c\": container with ID starting with 92ca8d45de8ac7e8e9e9845d946937bdb6c0e7b59b75b44b0160047c9b14a14c not found: ID does not exist" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075263 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert\") pod \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075376 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca\") pod \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075396 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles\") pod \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075432 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config\") pod \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075476 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca\") pod \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075505 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhl5d\" (UniqueName: \"kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d\") pod \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075546 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m472w\" (UniqueName: \"kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w\") pod \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075566 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config\") pod \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\" (UID: \"d98b30cd-c5bf-48e9-880f-e67bd60a2c51\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.075589 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert\") pod \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\" (UID: \"6da1df83-1b31-41dc-b8d2-ef4e7c2efb68\") " Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.077192 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" (UID: "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.077271 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config" (OuterVolumeSpecName: "config") pod "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" (UID: "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.077325 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca" (OuterVolumeSpecName: "client-ca") pod "d98b30cd-c5bf-48e9-880f-e67bd60a2c51" (UID: "d98b30cd-c5bf-48e9-880f-e67bd60a2c51"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.077223 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca" (OuterVolumeSpecName: "client-ca") pod "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" (UID: "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.077217 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config" (OuterVolumeSpecName: "config") pod "d98b30cd-c5bf-48e9-880f-e67bd60a2c51" (UID: "d98b30cd-c5bf-48e9-880f-e67bd60a2c51"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.082994 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" (UID: "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.083006 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d98b30cd-c5bf-48e9-880f-e67bd60a2c51" (UID: "d98b30cd-c5bf-48e9-880f-e67bd60a2c51"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.083042 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w" (OuterVolumeSpecName: "kube-api-access-m472w") pod "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" (UID: "6da1df83-1b31-41dc-b8d2-ef4e7c2efb68"). InnerVolumeSpecName "kube-api-access-m472w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.083814 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d" (OuterVolumeSpecName: "kube-api-access-rhl5d") pod "d98b30cd-c5bf-48e9-880f-e67bd60a2c51" (UID: "d98b30cd-c5bf-48e9-880f-e67bd60a2c51"). InnerVolumeSpecName "kube-api-access-rhl5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176861 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m472w\" (UniqueName: \"kubernetes.io/projected/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-kube-api-access-m472w\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176907 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176923 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176937 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176948 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176962 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176972 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176982 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.176994 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhl5d\" (UniqueName: \"kubernetes.io/projected/d98b30cd-c5bf-48e9-880f-e67bd60a2c51-kube-api-access-rhl5d\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.295828 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.300054 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54f9489466-vrz6t"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.311946 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.317227 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8676955c6b-92dhq"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635182 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635545 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="extract-utilities" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635565 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="extract-utilities" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635577 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="extract-content" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635584 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="extract-content" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635595 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" containerName="controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635602 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" containerName="controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635616 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635624 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635632 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635638 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635646 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="extract-content" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635652 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="extract-content" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635669 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="extract-utilities" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635675 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="extract-utilities" Jan 29 13:22:09 crc kubenswrapper[4787]: E0129 13:22:09.635684 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" containerName="route-controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635690 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" containerName="route-controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635811 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="df99e00f-9a78-454e-9f8e-5da684f374a1" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635822 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" containerName="route-controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635837 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" containerName="controller-manager" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.635844 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c3abece-22a6-44f4-9b3d-77ad9eed03b7" containerName="registry-server" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.636468 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.638916 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.639391 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.640245 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.640395 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.640576 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.640626 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.640767 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.645504 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.646204 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.646393 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.646522 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.649566 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.649888 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.655906 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.658486 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.658536 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.662271 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.786841 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fcsq\" (UniqueName: \"kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.786893 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzbrj\" (UniqueName: \"kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.786926 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787076 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787113 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787147 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787233 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787350 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.787389 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.888982 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889106 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fcsq\" (UniqueName: \"kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzbrj\" (UniqueName: \"kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889181 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889201 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889225 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889250 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889274 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.889301 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.890581 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.890622 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.890820 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.891055 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.891713 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.893588 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.895283 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.922383 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fcsq\" (UniqueName: \"kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq\") pod \"route-controller-manager-6c9c5cf9b7-ksxrf\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.925138 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzbrj\" (UniqueName: \"kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj\") pod \"controller-manager-6797996694-vbmn5\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.961589 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:09 crc kubenswrapper[4787]: I0129 13:22:09.973573 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.002248 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6da1df83-1b31-41dc-b8d2-ef4e7c2efb68" path="/var/lib/kubelet/pods/6da1df83-1b31-41dc-b8d2-ef4e7c2efb68/volumes" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.003105 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d98b30cd-c5bf-48e9-880f-e67bd60a2c51" path="/var/lib/kubelet/pods/d98b30cd-c5bf-48e9-880f-e67bd60a2c51/volumes" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.278962 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:10 crc kubenswrapper[4787]: W0129 13:22:10.290907 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd89e89f5_53ed_41dd_9db2_e9caf246bebe.slice/crio-9dfda27dcc3d2b076cbe240850c7fc51e39cf4d9d2b28b039f1bdb8e2e4765e8 WatchSource:0}: Error finding container 9dfda27dcc3d2b076cbe240850c7fc51e39cf4d9d2b28b039f1bdb8e2e4765e8: Status 404 returned error can't find the container with id 9dfda27dcc3d2b076cbe240850c7fc51e39cf4d9d2b28b039f1bdb8e2e4765e8 Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.418678 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:10 crc kubenswrapper[4787]: W0129 13:22:10.422238 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadb4b4b4_5074_4c3f_a8a1_ce8e66d584f6.slice/crio-9f76784f4257174544466b2ee5a72701580d671722bf53f735047dd41d8a1eaa WatchSource:0}: Error finding container 9f76784f4257174544466b2ee5a72701580d671722bf53f735047dd41d8a1eaa: Status 404 returned error can't find the container with id 9f76784f4257174544466b2ee5a72701580d671722bf53f735047dd41d8a1eaa Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.984151 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" event={"ID":"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6","Type":"ContainerStarted","Data":"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c"} Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.984566 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" event={"ID":"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6","Type":"ContainerStarted","Data":"9f76784f4257174544466b2ee5a72701580d671722bf53f735047dd41d8a1eaa"} Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.984646 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.987610 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" event={"ID":"d89e89f5-53ed-41dd-9db2-e9caf246bebe","Type":"ContainerStarted","Data":"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771"} Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.987668 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" event={"ID":"d89e89f5-53ed-41dd-9db2-e9caf246bebe","Type":"ContainerStarted","Data":"9dfda27dcc3d2b076cbe240850c7fc51e39cf4d9d2b28b039f1bdb8e2e4765e8"} Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.987883 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.990547 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:10 crc kubenswrapper[4787]: I0129 13:22:10.999593 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:11 crc kubenswrapper[4787]: I0129 13:22:11.012435 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" podStartSLOduration=3.012408227 podStartE2EDuration="3.012408227s" podCreationTimestamp="2026-01-29 13:22:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:11.011470588 +0000 UTC m=+369.772730864" watchObservedRunningTime="2026-01-29 13:22:11.012408227 +0000 UTC m=+369.773668503" Jan 29 13:22:11 crc kubenswrapper[4787]: I0129 13:22:11.043315 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" podStartSLOduration=3.043295566 podStartE2EDuration="3.043295566s" podCreationTimestamp="2026-01-29 13:22:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:11.041602234 +0000 UTC m=+369.802862510" watchObservedRunningTime="2026-01-29 13:22:11.043295566 +0000 UTC m=+369.804555842" Jan 29 13:22:27 crc kubenswrapper[4787]: I0129 13:22:27.597578 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.260107 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.260396 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" podUID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" containerName="controller-manager" containerID="cri-o://dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c" gracePeriod=30 Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.358034 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.358788 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" podUID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" containerName="route-controller-manager" containerID="cri-o://047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771" gracePeriod=30 Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.394515 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.394586 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.898787 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.902501 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985691 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert\") pod \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985760 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca\") pod \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985805 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config\") pod \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985884 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fcsq\" (UniqueName: \"kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq\") pod \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985922 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca\") pod \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.985968 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config\") pod \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\" (UID: \"d89e89f5-53ed-41dd-9db2-e9caf246bebe\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.986003 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles\") pod \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.986037 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzbrj\" (UniqueName: \"kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj\") pod \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.986086 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert\") pod \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\" (UID: \"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6\") " Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.987358 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca" (OuterVolumeSpecName: "client-ca") pod "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" (UID: "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.987476 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca" (OuterVolumeSpecName: "client-ca") pod "d89e89f5-53ed-41dd-9db2-e9caf246bebe" (UID: "d89e89f5-53ed-41dd-9db2-e9caf246bebe"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.987996 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" (UID: "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.988104 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config" (OuterVolumeSpecName: "config") pod "d89e89f5-53ed-41dd-9db2-e9caf246bebe" (UID: "d89e89f5-53ed-41dd-9db2-e9caf246bebe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.988617 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config" (OuterVolumeSpecName: "config") pod "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" (UID: "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.994002 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d89e89f5-53ed-41dd-9db2-e9caf246bebe" (UID: "d89e89f5-53ed-41dd-9db2-e9caf246bebe"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.994047 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" (UID: "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.994101 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq" (OuterVolumeSpecName: "kube-api-access-5fcsq") pod "d89e89f5-53ed-41dd-9db2-e9caf246bebe" (UID: "d89e89f5-53ed-41dd-9db2-e9caf246bebe"). InnerVolumeSpecName "kube-api-access-5fcsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:28 crc kubenswrapper[4787]: I0129 13:22:28.994170 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj" (OuterVolumeSpecName: "kube-api-access-mzbrj") pod "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" (UID: "adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6"). InnerVolumeSpecName "kube-api-access-mzbrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087733 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fcsq\" (UniqueName: \"kubernetes.io/projected/d89e89f5-53ed-41dd-9db2-e9caf246bebe-kube-api-access-5fcsq\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087767 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087779 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087790 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087799 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzbrj\" (UniqueName: \"kubernetes.io/projected/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-kube-api-access-mzbrj\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087811 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087819 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d89e89f5-53ed-41dd-9db2-e9caf246bebe-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087829 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d89e89f5-53ed-41dd-9db2-e9caf246bebe-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.087838 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.096577 4787 generic.go:334] "Generic (PLEG): container finished" podID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" containerID="dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c" exitCode=0 Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.096621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" event={"ID":"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6","Type":"ContainerDied","Data":"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c"} Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.096672 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" event={"ID":"adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6","Type":"ContainerDied","Data":"9f76784f4257174544466b2ee5a72701580d671722bf53f735047dd41d8a1eaa"} Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.096687 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6797996694-vbmn5" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.096697 4787 scope.go:117] "RemoveContainer" containerID="dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.098844 4787 generic.go:334] "Generic (PLEG): container finished" podID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" containerID="047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771" exitCode=0 Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.098876 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" event={"ID":"d89e89f5-53ed-41dd-9db2-e9caf246bebe","Type":"ContainerDied","Data":"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771"} Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.098900 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" event={"ID":"d89e89f5-53ed-41dd-9db2-e9caf246bebe","Type":"ContainerDied","Data":"9dfda27dcc3d2b076cbe240850c7fc51e39cf4d9d2b28b039f1bdb8e2e4765e8"} Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.098969 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.119749 4787 scope.go:117] "RemoveContainer" containerID="dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c" Jan 29 13:22:29 crc kubenswrapper[4787]: E0129 13:22:29.120445 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c\": container with ID starting with dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c not found: ID does not exist" containerID="dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.120746 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c"} err="failed to get container status \"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c\": rpc error: code = NotFound desc = could not find container \"dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c\": container with ID starting with dd794c4757ae86214611e831025ee1fd75bf204eb5b7013dd1fed1c483ada13c not found: ID does not exist" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.120788 4787 scope.go:117] "RemoveContainer" containerID="047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.135655 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.143692 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6c9c5cf9b7-ksxrf"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.147674 4787 scope.go:117] "RemoveContainer" containerID="047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.147788 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:29 crc kubenswrapper[4787]: E0129 13:22:29.148495 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771\": container with ID starting with 047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771 not found: ID does not exist" containerID="047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.148642 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771"} err="failed to get container status \"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771\": rpc error: code = NotFound desc = could not find container \"047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771\": container with ID starting with 047b2f9a21568648b6f3f84933f68bfd5e288f853ccdd6e04f13fb96463f6771 not found: ID does not exist" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.151091 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6797996694-vbmn5"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.650978 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:29 crc kubenswrapper[4787]: E0129 13:22:29.651394 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" containerName="controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.651416 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" containerName="controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: E0129 13:22:29.651446 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" containerName="route-controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.651471 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" containerName="route-controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.651610 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" containerName="controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.651628 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" containerName="route-controller-manager" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.652416 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.656773 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.663106 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.665188 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.665748 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.666159 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.666709 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.667364 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.667894 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.671927 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.679198 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.707144 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.707282 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.707557 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.707893 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.708824 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.708880 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.708923 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.708952 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n85bz\" (UniqueName: \"kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.709004 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v59k\" (UniqueName: \"kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.709039 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.709076 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.709878 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.709997 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.710153 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.712873 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.717432 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811591 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v59k\" (UniqueName: \"kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811681 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811721 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811767 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811817 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811856 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811886 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811917 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.811947 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n85bz\" (UniqueName: \"kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.813988 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.814720 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.815772 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.815856 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.816335 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.821132 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.825859 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.833533 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v59k\" (UniqueName: \"kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k\") pod \"controller-manager-67d5c8dc4f-7g9tm\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.838215 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n85bz\" (UniqueName: \"kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz\") pod \"route-controller-manager-6758945cb4-fqcjd\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.995654 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6" path="/var/lib/kubelet/pods/adb4b4b4-5074-4c3f-a8a1-ce8e66d584f6/volumes" Jan 29 13:22:29 crc kubenswrapper[4787]: I0129 13:22:29.996168 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d89e89f5-53ed-41dd-9db2-e9caf246bebe" path="/var/lib/kubelet/pods/d89e89f5-53ed-41dd-9db2-e9caf246bebe/volumes" Jan 29 13:22:30 crc kubenswrapper[4787]: I0129 13:22:30.017421 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:30 crc kubenswrapper[4787]: I0129 13:22:30.029200 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:30 crc kubenswrapper[4787]: I0129 13:22:30.469710 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:30 crc kubenswrapper[4787]: I0129 13:22:30.535648 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:30 crc kubenswrapper[4787]: W0129 13:22:30.546771 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ff5d48f_f41a_4d59_82e2_4c31607237d9.slice/crio-43f9d46e44a0e007af4ea2d52134971cc12a38d094cbb60944b2b621cfddab89 WatchSource:0}: Error finding container 43f9d46e44a0e007af4ea2d52134971cc12a38d094cbb60944b2b621cfddab89: Status 404 returned error can't find the container with id 43f9d46e44a0e007af4ea2d52134971cc12a38d094cbb60944b2b621cfddab89 Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.121260 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" event={"ID":"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a","Type":"ContainerStarted","Data":"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe"} Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.121719 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.121735 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" event={"ID":"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a","Type":"ContainerStarted","Data":"b49c3317146e264a9499ce9b152d3f15ac2670648ba16ce790e4e5aeb8e6dfb8"} Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.124076 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" event={"ID":"5ff5d48f-f41a-4d59-82e2-4c31607237d9","Type":"ContainerStarted","Data":"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b"} Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.124108 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" event={"ID":"5ff5d48f-f41a-4d59-82e2-4c31607237d9","Type":"ContainerStarted","Data":"43f9d46e44a0e007af4ea2d52134971cc12a38d094cbb60944b2b621cfddab89"} Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.124304 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.126776 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.131812 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.169372 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" podStartSLOduration=3.169348853 podStartE2EDuration="3.169348853s" podCreationTimestamp="2026-01-29 13:22:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:31.148050388 +0000 UTC m=+389.909310674" watchObservedRunningTime="2026-01-29 13:22:31.169348853 +0000 UTC m=+389.930609119" Jan 29 13:22:31 crc kubenswrapper[4787]: I0129 13:22:31.194969 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" podStartSLOduration=3.19494635 podStartE2EDuration="3.19494635s" podCreationTimestamp="2026-01-29 13:22:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:31.194183306 +0000 UTC m=+389.955443582" watchObservedRunningTime="2026-01-29 13:22:31.19494635 +0000 UTC m=+389.956206626" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.203509 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.204728 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" podUID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" containerName="controller-manager" containerID="cri-o://b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe" gracePeriod=30 Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.240268 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.240598 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" podUID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" containerName="route-controller-manager" containerID="cri-o://901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b" gracePeriod=30 Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.821825 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.881152 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.932305 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n85bz\" (UniqueName: \"kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz\") pod \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.932426 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config\") pod \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.932479 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert\") pod \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.932516 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca\") pod \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\" (UID: \"5ff5d48f-f41a-4d59-82e2-4c31607237d9\") " Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.933819 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config" (OuterVolumeSpecName: "config") pod "5ff5d48f-f41a-4d59-82e2-4c31607237d9" (UID: "5ff5d48f-f41a-4d59-82e2-4c31607237d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.935136 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5ff5d48f-f41a-4d59-82e2-4c31607237d9" (UID: "5ff5d48f-f41a-4d59-82e2-4c31607237d9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.940114 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz" (OuterVolumeSpecName: "kube-api-access-n85bz") pod "5ff5d48f-f41a-4d59-82e2-4c31607237d9" (UID: "5ff5d48f-f41a-4d59-82e2-4c31607237d9"). InnerVolumeSpecName "kube-api-access-n85bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:48 crc kubenswrapper[4787]: I0129 13:22:48.940184 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5ff5d48f-f41a-4d59-82e2-4c31607237d9" (UID: "5ff5d48f-f41a-4d59-82e2-4c31607237d9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.033769 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert\") pod \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.033883 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca\") pod \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034074 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config\") pod \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034176 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v59k\" (UniqueName: \"kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k\") pod \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034244 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles\") pod \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\" (UID: \"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a\") " Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034647 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ff5d48f-f41a-4d59-82e2-4c31607237d9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034669 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034684 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n85bz\" (UniqueName: \"kubernetes.io/projected/5ff5d48f-f41a-4d59-82e2-4c31607237d9-kube-api-access-n85bz\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.034718 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ff5d48f-f41a-4d59-82e2-4c31607237d9-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.035518 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca" (OuterVolumeSpecName: "client-ca") pod "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" (UID: "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.035616 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config" (OuterVolumeSpecName: "config") pod "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" (UID: "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.035713 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" (UID: "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.038444 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k" (OuterVolumeSpecName: "kube-api-access-8v59k") pod "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" (UID: "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a"). InnerVolumeSpecName "kube-api-access-8v59k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.038551 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" (UID: "f50ea2e6-6221-4b40-844e-86ecf8fa7f1a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.135826 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.135867 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v59k\" (UniqueName: \"kubernetes.io/projected/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-kube-api-access-8v59k\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.135882 4787 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.135894 4787 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.135905 4787 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.239095 4787 generic.go:334] "Generic (PLEG): container finished" podID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" containerID="b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe" exitCode=0 Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.239198 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.239229 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" event={"ID":"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a","Type":"ContainerDied","Data":"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe"} Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.239549 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm" event={"ID":"f50ea2e6-6221-4b40-844e-86ecf8fa7f1a","Type":"ContainerDied","Data":"b49c3317146e264a9499ce9b152d3f15ac2670648ba16ce790e4e5aeb8e6dfb8"} Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.239577 4787 scope.go:117] "RemoveContainer" containerID="b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.242621 4787 generic.go:334] "Generic (PLEG): container finished" podID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" containerID="901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b" exitCode=0 Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.242679 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" event={"ID":"5ff5d48f-f41a-4d59-82e2-4c31607237d9","Type":"ContainerDied","Data":"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b"} Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.242713 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" event={"ID":"5ff5d48f-f41a-4d59-82e2-4c31607237d9","Type":"ContainerDied","Data":"43f9d46e44a0e007af4ea2d52134971cc12a38d094cbb60944b2b621cfddab89"} Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.242779 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.268990 4787 scope.go:117] "RemoveContainer" containerID="b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe" Jan 29 13:22:49 crc kubenswrapper[4787]: E0129 13:22:49.270048 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe\": container with ID starting with b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe not found: ID does not exist" containerID="b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.270137 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe"} err="failed to get container status \"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe\": rpc error: code = NotFound desc = could not find container \"b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe\": container with ID starting with b6c99b7da59e5ad55099c5fe0e4b8f47a48f3ea4ada3da643086033aaa340ffe not found: ID does not exist" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.270195 4787 scope.go:117] "RemoveContainer" containerID="901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.287624 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.293632 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-67d5c8dc4f-7g9tm"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.302089 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.304026 4787 scope.go:117] "RemoveContainer" containerID="901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b" Jan 29 13:22:49 crc kubenswrapper[4787]: E0129 13:22:49.304844 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b\": container with ID starting with 901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b not found: ID does not exist" containerID="901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.304911 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b"} err="failed to get container status \"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b\": rpc error: code = NotFound desc = could not find container \"901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b\": container with ID starting with 901eda08220c46805e6a1971b9cca7b690c66525ac12c91a4a55328e3e10688b not found: ID does not exist" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.306849 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6758945cb4-fqcjd"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.675690 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-75488b5d77-ht9b8"] Jan 29 13:22:49 crc kubenswrapper[4787]: E0129 13:22:49.676166 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" containerName="route-controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.676187 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" containerName="route-controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: E0129 13:22:49.676213 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" containerName="controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.676220 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" containerName="controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.676389 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" containerName="controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.676411 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" containerName="route-controller-manager" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.677114 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.685278 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.686793 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.687076 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.687333 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.687890 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.689351 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.691308 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.691901 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.693592 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.698331 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.698535 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.698805 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.699066 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.699212 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.699350 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.699672 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.706169 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-75488b5d77-ht9b8"] Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.847911 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-config\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848006 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-config\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848050 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcsnt\" (UniqueName: \"kubernetes.io/projected/7818e1ce-208d-414a-8c0b-a991daa65d2c-kube-api-access-zcsnt\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848078 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpr59\" (UniqueName: \"kubernetes.io/projected/80a21fb2-4be2-424f-bf63-c64cf7750a69-kube-api-access-bpr59\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848111 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-proxy-ca-bundles\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848141 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7818e1ce-208d-414a-8c0b-a991daa65d2c-serving-cert\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848168 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a21fb2-4be2-424f-bf63-c64cf7750a69-serving-cert\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848203 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-client-ca\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.848255 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-client-ca\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.948991 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcsnt\" (UniqueName: \"kubernetes.io/projected/7818e1ce-208d-414a-8c0b-a991daa65d2c-kube-api-access-zcsnt\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949045 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpr59\" (UniqueName: \"kubernetes.io/projected/80a21fb2-4be2-424f-bf63-c64cf7750a69-kube-api-access-bpr59\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949076 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-proxy-ca-bundles\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949102 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7818e1ce-208d-414a-8c0b-a991daa65d2c-serving-cert\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949128 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a21fb2-4be2-424f-bf63-c64cf7750a69-serving-cert\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949830 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-client-ca\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.949867 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-client-ca\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.950061 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-config\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.950138 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-config\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.950354 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-proxy-ca-bundles\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.950883 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-client-ca\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.951281 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-client-ca\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.951502 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a21fb2-4be2-424f-bf63-c64cf7750a69-config\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.952083 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7818e1ce-208d-414a-8c0b-a991daa65d2c-config\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.954688 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7818e1ce-208d-414a-8c0b-a991daa65d2c-serving-cert\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.956231 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a21fb2-4be2-424f-bf63-c64cf7750a69-serving-cert\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.968646 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcsnt\" (UniqueName: \"kubernetes.io/projected/7818e1ce-208d-414a-8c0b-a991daa65d2c-kube-api-access-zcsnt\") pod \"controller-manager-75488b5d77-ht9b8\" (UID: \"7818e1ce-208d-414a-8c0b-a991daa65d2c\") " pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.969543 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpr59\" (UniqueName: \"kubernetes.io/projected/80a21fb2-4be2-424f-bf63-c64cf7750a69-kube-api-access-bpr59\") pod \"route-controller-manager-7d594996db-8mmdk\" (UID: \"80a21fb2-4be2-424f-bf63-c64cf7750a69\") " pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.992428 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ff5d48f-f41a-4d59-82e2-4c31607237d9" path="/var/lib/kubelet/pods/5ff5d48f-f41a-4d59-82e2-4c31607237d9/volumes" Jan 29 13:22:49 crc kubenswrapper[4787]: I0129 13:22:49.993063 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f50ea2e6-6221-4b40-844e-86ecf8fa7f1a" path="/var/lib/kubelet/pods/f50ea2e6-6221-4b40-844e-86ecf8fa7f1a/volumes" Jan 29 13:22:50 crc kubenswrapper[4787]: I0129 13:22:50.016283 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:50 crc kubenswrapper[4787]: I0129 13:22:50.028939 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:50 crc kubenswrapper[4787]: I0129 13:22:50.296797 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk"] Jan 29 13:22:50 crc kubenswrapper[4787]: W0129 13:22:50.305414 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80a21fb2_4be2_424f_bf63_c64cf7750a69.slice/crio-e8080d366cb3d7a5100cba17057dab23f71f77182a55e5511dba933b961aad28 WatchSource:0}: Error finding container e8080d366cb3d7a5100cba17057dab23f71f77182a55e5511dba933b961aad28: Status 404 returned error can't find the container with id e8080d366cb3d7a5100cba17057dab23f71f77182a55e5511dba933b961aad28 Jan 29 13:22:50 crc kubenswrapper[4787]: I0129 13:22:50.348185 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-75488b5d77-ht9b8"] Jan 29 13:22:50 crc kubenswrapper[4787]: W0129 13:22:50.348615 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7818e1ce_208d_414a_8c0b_a991daa65d2c.slice/crio-6addf9d2cfab277d9d69ef56b8304717ebf9573b7a52acd6f5a9c01bc4c8de2e WatchSource:0}: Error finding container 6addf9d2cfab277d9d69ef56b8304717ebf9573b7a52acd6f5a9c01bc4c8de2e: Status 404 returned error can't find the container with id 6addf9d2cfab277d9d69ef56b8304717ebf9573b7a52acd6f5a9c01bc4c8de2e Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.267229 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" event={"ID":"7818e1ce-208d-414a-8c0b-a991daa65d2c","Type":"ContainerStarted","Data":"ceb8dd64d16af5473b784275a3ea2ad150d2bd8ad9de2c2afead516c1369b47d"} Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.267880 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" event={"ID":"7818e1ce-208d-414a-8c0b-a991daa65d2c","Type":"ContainerStarted","Data":"6addf9d2cfab277d9d69ef56b8304717ebf9573b7a52acd6f5a9c01bc4c8de2e"} Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.268221 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.272404 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" event={"ID":"80a21fb2-4be2-424f-bf63-c64cf7750a69","Type":"ContainerStarted","Data":"271f23fc619caba7e4b393896327e50f5b1760f7125dc0bf185e67630ebe3090"} Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.272503 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" event={"ID":"80a21fb2-4be2-424f-bf63-c64cf7750a69","Type":"ContainerStarted","Data":"e8080d366cb3d7a5100cba17057dab23f71f77182a55e5511dba933b961aad28"} Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.272884 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.279322 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.286188 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.294283 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-75488b5d77-ht9b8" podStartSLOduration=3.294252922 podStartE2EDuration="3.294252922s" podCreationTimestamp="2026-01-29 13:22:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:51.288309806 +0000 UTC m=+410.049570082" watchObservedRunningTime="2026-01-29 13:22:51.294252922 +0000 UTC m=+410.055513208" Jan 29 13:22:51 crc kubenswrapper[4787]: I0129 13:22:51.331336 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d594996db-8mmdk" podStartSLOduration=3.331300639 podStartE2EDuration="3.331300639s" podCreationTimestamp="2026-01-29 13:22:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:51.330433723 +0000 UTC m=+410.091694009" watchObservedRunningTime="2026-01-29 13:22:51.331300639 +0000 UTC m=+410.092560915" Jan 29 13:22:52 crc kubenswrapper[4787]: I0129 13:22:52.622378 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" containerID="cri-o://4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74" gracePeriod=15 Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.083533 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.203200 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.204848 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.204878 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.204898 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.204944 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cm6rj\" (UniqueName: \"kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.204979 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205008 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205073 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205098 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205134 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205171 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205261 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205306 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.205325 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig\") pod \"24a38653-de36-438f-a9d7-fde6f094004f\" (UID: \"24a38653-de36-438f-a9d7-fde6f094004f\") " Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.206045 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.206054 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.206341 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.206425 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.207111 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.211119 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj" (OuterVolumeSpecName: "kube-api-access-cm6rj") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "kube-api-access-cm6rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.211275 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.212820 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.213061 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.213430 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.215902 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.216163 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.216578 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.216725 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "24a38653-de36-438f-a9d7-fde6f094004f" (UID: "24a38653-de36-438f-a9d7-fde6f094004f"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.286975 4787 generic.go:334] "Generic (PLEG): container finished" podID="24a38653-de36-438f-a9d7-fde6f094004f" containerID="4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74" exitCode=0 Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.287928 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.289816 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" event={"ID":"24a38653-de36-438f-a9d7-fde6f094004f","Type":"ContainerDied","Data":"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74"} Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.289966 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qg2fk" event={"ID":"24a38653-de36-438f-a9d7-fde6f094004f","Type":"ContainerDied","Data":"380e7f77ff5d33b919ee64ff944cf5bc45fa5a2aae046d19f148f1fd89ab1820"} Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.290059 4787 scope.go:117] "RemoveContainer" containerID="4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.306967 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cm6rj\" (UniqueName: \"kubernetes.io/projected/24a38653-de36-438f-a9d7-fde6f094004f-kube-api-access-cm6rj\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307004 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307020 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307044 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307061 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307075 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307090 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307105 4787 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/24a38653-de36-438f-a9d7-fde6f094004f-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307120 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307136 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307153 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307168 4787 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/24a38653-de36-438f-a9d7-fde6f094004f-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307183 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.307198 4787 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/24a38653-de36-438f-a9d7-fde6f094004f-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.310481 4787 scope.go:117] "RemoveContainer" containerID="4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74" Jan 29 13:22:53 crc kubenswrapper[4787]: E0129 13:22:53.311215 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74\": container with ID starting with 4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74 not found: ID does not exist" containerID="4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.311271 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74"} err="failed to get container status \"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74\": rpc error: code = NotFound desc = could not find container \"4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74\": container with ID starting with 4b6d8f083789ca3eb618cf4da51a7ff7b8c05ea10616657b80bf3e327e5bcf74 not found: ID does not exist" Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.343437 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.347636 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qg2fk"] Jan 29 13:22:53 crc kubenswrapper[4787]: I0129 13:22:53.999023 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24a38653-de36-438f-a9d7-fde6f094004f" path="/var/lib/kubelet/pods/24a38653-de36-438f-a9d7-fde6f094004f/volumes" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.672014 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-566b7b5974-sqjft"] Jan 29 13:22:57 crc kubenswrapper[4787]: E0129 13:22:57.672593 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.672607 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.672739 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="24a38653-de36-438f-a9d7-fde6f094004f" containerName="oauth-openshift" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.673262 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.675676 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.676550 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.676862 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.676937 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.678404 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.678431 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.678487 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.678785 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.679844 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.679936 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.680083 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.681108 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.690093 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.695346 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-566b7b5974-sqjft"] Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.704954 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.712245 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768106 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-875jz\" (UniqueName: \"kubernetes.io/projected/5c2f46c4-a4cb-4442-8e75-65e67c463833-kube-api-access-875jz\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768205 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768241 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-router-certs\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768266 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-session\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768334 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768367 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-policies\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768394 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-error\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768411 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-serving-cert\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768432 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768576 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768606 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-cliconfig\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768718 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-dir\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768815 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-login\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.768858 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-service-ca\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870506 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870565 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-router-certs\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870621 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-session\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870645 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870677 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-policies\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870700 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-error\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870721 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-serving-cert\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870742 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870761 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870778 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-cliconfig\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870799 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-dir\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-login\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870848 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-service-ca\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.870870 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-875jz\" (UniqueName: \"kubernetes.io/projected/5c2f46c4-a4cb-4442-8e75-65e67c463833-kube-api-access-875jz\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.871581 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-dir\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.872178 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.872342 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-audit-policies\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.872762 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-service-ca\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.874046 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-cliconfig\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.878515 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-serving-cert\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.878882 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-router-certs\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.878875 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-session\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.879602 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-login\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.879944 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.880031 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-error\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.882338 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.882760 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c2f46c4-a4cb-4442-8e75-65e67c463833-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:57 crc kubenswrapper[4787]: I0129 13:22:57.890143 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-875jz\" (UniqueName: \"kubernetes.io/projected/5c2f46c4-a4cb-4442-8e75-65e67c463833-kube-api-access-875jz\") pod \"oauth-openshift-566b7b5974-sqjft\" (UID: \"5c2f46c4-a4cb-4442-8e75-65e67c463833\") " pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.044168 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.394997 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.395075 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.395132 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.395822 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.395882 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447" gracePeriod=600 Jan 29 13:22:58 crc kubenswrapper[4787]: I0129 13:22:58.504026 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-566b7b5974-sqjft"] Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.328321 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447" exitCode=0 Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.328509 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447"} Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.328924 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5"} Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.328948 4787 scope.go:117] "RemoveContainer" containerID="a1a936bdbda8d9095ba9d28987c71f5334b08d54309471c50e020d5df39001ef" Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.333818 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" event={"ID":"5c2f46c4-a4cb-4442-8e75-65e67c463833","Type":"ContainerStarted","Data":"7b3d8c73b0df57acee93a4835a46159727021c292155d986d01f9b43aa1fc9a5"} Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.333878 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" event={"ID":"5c2f46c4-a4cb-4442-8e75-65e67c463833","Type":"ContainerStarted","Data":"9a156e79b56ed4840665034ccfde10507f01de54515ed720862b0948923132b3"} Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.334551 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.372178 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" podStartSLOduration=32.37215224 podStartE2EDuration="32.37215224s" podCreationTimestamp="2026-01-29 13:22:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:22:59.371034437 +0000 UTC m=+418.132294733" watchObservedRunningTime="2026-01-29 13:22:59.37215224 +0000 UTC m=+418.133412516" Jan 29 13:22:59 crc kubenswrapper[4787]: I0129 13:22:59.527195 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-566b7b5974-sqjft" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.340514 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.341711 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z6slf" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="registry-server" containerID="cri-o://70872f43e234762504fa14291d7bc5bd220dcd2843936b11aee5e0958022b4ce" gracePeriod=30 Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.354959 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.355293 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pww2l" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="registry-server" containerID="cri-o://cbb19a2bd8809579d61db7fce3fbd04990a2652440cc2f7a735a7a5300f9c3f4" gracePeriod=30 Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.364210 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.364495 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" containerID="cri-o://c17994ae0e03bf9a5bcc584e40d456b280539099db9d99b70cfe2fb8b35aa5dd" gracePeriod=30 Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.380565 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.380860 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n44gv" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="registry-server" containerID="cri-o://29e282650465855593933e61ba2cb1dc7b8fb2cfab016f1ce5e3e6347bd65b64" gracePeriod=30 Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.403675 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.403967 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-87tmc" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="registry-server" containerID="cri-o://73dcb7c76033f7513ba7114a5cd026fdcb64f5090efa355c843a65a2be7d67d9" gracePeriod=30 Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.409701 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kvljk"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.410670 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.440933 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kvljk"] Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.573917 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blqjl\" (UniqueName: \"kubernetes.io/projected/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-kube-api-access-blqjl\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.573982 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.574059 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.675493 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blqjl\" (UniqueName: \"kubernetes.io/projected/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-kube-api-access-blqjl\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.675554 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.675620 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.677411 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.685946 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.694292 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blqjl\" (UniqueName: \"kubernetes.io/projected/b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7-kube-api-access-blqjl\") pod \"marketplace-operator-79b997595-kvljk\" (UID: \"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:19 crc kubenswrapper[4787]: I0129 13:23:19.727854 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.188297 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-kvljk"] Jan 29 13:23:20 crc kubenswrapper[4787]: W0129 13:23:20.203712 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9cf6a55_fb13_46ac_b0e6_fccf7aeee0e7.slice/crio-4ffdde9ebeeedbdba696a5f325af304f4f0defbbdcd9455c30fb05e8f621e49e WatchSource:0}: Error finding container 4ffdde9ebeeedbdba696a5f325af304f4f0defbbdcd9455c30fb05e8f621e49e: Status 404 returned error can't find the container with id 4ffdde9ebeeedbdba696a5f325af304f4f0defbbdcd9455c30fb05e8f621e49e Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.479822 4787 generic.go:334] "Generic (PLEG): container finished" podID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerID="cbb19a2bd8809579d61db7fce3fbd04990a2652440cc2f7a735a7a5300f9c3f4" exitCode=0 Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.479909 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerDied","Data":"cbb19a2bd8809579d61db7fce3fbd04990a2652440cc2f7a735a7a5300f9c3f4"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.483492 4787 generic.go:334] "Generic (PLEG): container finished" podID="60be26cc-9957-4401-85dd-7572bb78975f" containerID="70872f43e234762504fa14291d7bc5bd220dcd2843936b11aee5e0958022b4ce" exitCode=0 Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.483540 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerDied","Data":"70872f43e234762504fa14291d7bc5bd220dcd2843936b11aee5e0958022b4ce"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.487515 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" event={"ID":"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7","Type":"ContainerStarted","Data":"59f3b944b067792427001be26eb763a2a68a86b6739fb3dc5fb63303e755ae53"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.487579 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" event={"ID":"b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7","Type":"ContainerStarted","Data":"4ffdde9ebeeedbdba696a5f325af304f4f0defbbdcd9455c30fb05e8f621e49e"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.489504 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.495188 4787 generic.go:334] "Generic (PLEG): container finished" podID="ceef637f-9cff-4fce-95d5-7174181e363d" containerID="c17994ae0e03bf9a5bcc584e40d456b280539099db9d99b70cfe2fb8b35aa5dd" exitCode=0 Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.495279 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" event={"ID":"ceef637f-9cff-4fce-95d5-7174181e363d","Type":"ContainerDied","Data":"c17994ae0e03bf9a5bcc584e40d456b280539099db9d99b70cfe2fb8b35aa5dd"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.495526 4787 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-kvljk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.70:8080/healthz\": dial tcp 10.217.0.70:8080: connect: connection refused" start-of-body= Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.495586 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" podUID="b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.70:8080/healthz\": dial tcp 10.217.0.70:8080: connect: connection refused" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.501353 4787 generic.go:334] "Generic (PLEG): container finished" podID="d376a31e-47be-4275-a440-5a961fb875d3" containerID="73dcb7c76033f7513ba7114a5cd026fdcb64f5090efa355c843a65a2be7d67d9" exitCode=0 Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.501437 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerDied","Data":"73dcb7c76033f7513ba7114a5cd026fdcb64f5090efa355c843a65a2be7d67d9"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.504437 4787 generic.go:334] "Generic (PLEG): container finished" podID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerID="29e282650465855593933e61ba2cb1dc7b8fb2cfab016f1ce5e3e6347bd65b64" exitCode=0 Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.504548 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerDied","Data":"29e282650465855593933e61ba2cb1dc7b8fb2cfab016f1ce5e3e6347bd65b64"} Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.511737 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" podStartSLOduration=1.51171703 podStartE2EDuration="1.51171703s" podCreationTimestamp="2026-01-29 13:23:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:23:20.509319089 +0000 UTC m=+439.270579395" watchObservedRunningTime="2026-01-29 13:23:20.51171703 +0000 UTC m=+439.272977306" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.616334 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.728483 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.805983 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content\") pod \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.806253 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities\") pod \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.806288 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsprw\" (UniqueName: \"kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw\") pod \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\" (UID: \"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.808964 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities" (OuterVolumeSpecName: "utilities") pod "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" (UID: "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.815346 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw" (OuterVolumeSpecName: "kube-api-access-xsprw") pod "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" (UID: "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b"). InnerVolumeSpecName "kube-api-access-xsprw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.836481 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" (UID: "4f791725-08e7-42f5-b0ee-cd67dfc1fc1b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.907837 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content\") pod \"d376a31e-47be-4275-a440-5a961fb875d3\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.907987 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kbxs\" (UniqueName: \"kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs\") pod \"d376a31e-47be-4275-a440-5a961fb875d3\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.908030 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities\") pod \"d376a31e-47be-4275-a440-5a961fb875d3\" (UID: \"d376a31e-47be-4275-a440-5a961fb875d3\") " Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.908382 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.908403 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.908416 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsprw\" (UniqueName: \"kubernetes.io/projected/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b-kube-api-access-xsprw\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.909354 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities" (OuterVolumeSpecName: "utilities") pod "d376a31e-47be-4275-a440-5a961fb875d3" (UID: "d376a31e-47be-4275-a440-5a961fb875d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:20 crc kubenswrapper[4787]: I0129 13:23:20.913109 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs" (OuterVolumeSpecName: "kube-api-access-5kbxs") pod "d376a31e-47be-4275-a440-5a961fb875d3" (UID: "d376a31e-47be-4275-a440-5a961fb875d3"). InnerVolumeSpecName "kube-api-access-5kbxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.004785 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.009591 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.009632 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kbxs\" (UniqueName: \"kubernetes.io/projected/d376a31e-47be-4275-a440-5a961fb875d3-kube-api-access-5kbxs\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.021116 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.027553 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d376a31e-47be-4275-a440-5a961fb875d3" (UID: "d376a31e-47be-4275-a440-5a961fb875d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.039934 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.110969 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnl7g\" (UniqueName: \"kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g\") pod \"60be26cc-9957-4401-85dd-7572bb78975f\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.111027 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content\") pod \"60be26cc-9957-4401-85dd-7572bb78975f\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.111062 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities\") pod \"60be26cc-9957-4401-85dd-7572bb78975f\" (UID: \"60be26cc-9957-4401-85dd-7572bb78975f\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.111502 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d376a31e-47be-4275-a440-5a961fb875d3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.114549 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities" (OuterVolumeSpecName: "utilities") pod "60be26cc-9957-4401-85dd-7572bb78975f" (UID: "60be26cc-9957-4401-85dd-7572bb78975f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.115486 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g" (OuterVolumeSpecName: "kube-api-access-fnl7g") pod "60be26cc-9957-4401-85dd-7572bb78975f" (UID: "60be26cc-9957-4401-85dd-7572bb78975f"). InnerVolumeSpecName "kube-api-access-fnl7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.171125 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60be26cc-9957-4401-85dd-7572bb78975f" (UID: "60be26cc-9957-4401-85dd-7572bb78975f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212080 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m2tn\" (UniqueName: \"kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn\") pod \"95ac4597-f6a6-4a47-8892-d5b556c3363e\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212209 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca\") pod \"ceef637f-9cff-4fce-95d5-7174181e363d\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212266 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics\") pod \"ceef637f-9cff-4fce-95d5-7174181e363d\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212336 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities\") pod \"95ac4597-f6a6-4a47-8892-d5b556c3363e\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212364 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmbx5\" (UniqueName: \"kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5\") pod \"ceef637f-9cff-4fce-95d5-7174181e363d\" (UID: \"ceef637f-9cff-4fce-95d5-7174181e363d\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212388 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content\") pod \"95ac4597-f6a6-4a47-8892-d5b556c3363e\" (UID: \"95ac4597-f6a6-4a47-8892-d5b556c3363e\") " Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212693 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212719 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnl7g\" (UniqueName: \"kubernetes.io/projected/60be26cc-9957-4401-85dd-7572bb78975f-kube-api-access-fnl7g\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.212735 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60be26cc-9957-4401-85dd-7572bb78975f-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.213122 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "ceef637f-9cff-4fce-95d5-7174181e363d" (UID: "ceef637f-9cff-4fce-95d5-7174181e363d"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.213957 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities" (OuterVolumeSpecName: "utilities") pod "95ac4597-f6a6-4a47-8892-d5b556c3363e" (UID: "95ac4597-f6a6-4a47-8892-d5b556c3363e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.215120 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn" (OuterVolumeSpecName: "kube-api-access-2m2tn") pod "95ac4597-f6a6-4a47-8892-d5b556c3363e" (UID: "95ac4597-f6a6-4a47-8892-d5b556c3363e"). InnerVolumeSpecName "kube-api-access-2m2tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.215293 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "ceef637f-9cff-4fce-95d5-7174181e363d" (UID: "ceef637f-9cff-4fce-95d5-7174181e363d"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.216781 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5" (OuterVolumeSpecName: "kube-api-access-tmbx5") pod "ceef637f-9cff-4fce-95d5-7174181e363d" (UID: "ceef637f-9cff-4fce-95d5-7174181e363d"). InnerVolumeSpecName "kube-api-access-tmbx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.275814 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "95ac4597-f6a6-4a47-8892-d5b556c3363e" (UID: "95ac4597-f6a6-4a47-8892-d5b556c3363e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313821 4787 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313878 4787 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ceef637f-9cff-4fce-95d5-7174181e363d-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313911 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313922 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmbx5\" (UniqueName: \"kubernetes.io/projected/ceef637f-9cff-4fce-95d5-7174181e363d-kube-api-access-tmbx5\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313933 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95ac4597-f6a6-4a47-8892-d5b556c3363e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.313941 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m2tn\" (UniqueName: \"kubernetes.io/projected/95ac4597-f6a6-4a47-8892-d5b556c3363e-kube-api-access-2m2tn\") on node \"crc\" DevicePath \"\"" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.512174 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" event={"ID":"ceef637f-9cff-4fce-95d5-7174181e363d","Type":"ContainerDied","Data":"dec56c7819c1fd6d01c98f5221532816600b0b67355a64bfc5156fddc0646bb9"} Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.512209 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fxcnw" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.512259 4787 scope.go:117] "RemoveContainer" containerID="c17994ae0e03bf9a5bcc584e40d456b280539099db9d99b70cfe2fb8b35aa5dd" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.518504 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87tmc" event={"ID":"d376a31e-47be-4275-a440-5a961fb875d3","Type":"ContainerDied","Data":"a7491a4c10a12ece046afd1c3bc58217a1393d9558ca9fa89a23e128f664379b"} Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.518547 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87tmc" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.524326 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n44gv" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.524367 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n44gv" event={"ID":"4f791725-08e7-42f5-b0ee-cd67dfc1fc1b","Type":"ContainerDied","Data":"b708c0db648ee72703c34a208925026c96070a45c038227be94b2a2357f72052"} Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.529423 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pww2l" event={"ID":"95ac4597-f6a6-4a47-8892-d5b556c3363e","Type":"ContainerDied","Data":"81111c33c9d9d93651fafb4e656625efef22a664f9bf523ebfba9d78f968a223"} Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.529687 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pww2l" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.535235 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z6slf" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.536841 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z6slf" event={"ID":"60be26cc-9957-4401-85dd-7572bb78975f","Type":"ContainerDied","Data":"007a2f606eab62df25077c8960ce65ec89fb1165ad16e760eee8c6d0fe80346d"} Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.540280 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-kvljk" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.543521 4787 scope.go:117] "RemoveContainer" containerID="73dcb7c76033f7513ba7114a5cd026fdcb64f5090efa355c843a65a2be7d67d9" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.561669 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.562182 4787 scope.go:117] "RemoveContainer" containerID="66ec05fbf8058dee6adf21bdd1b253573a3dfd9d9c7cb5d847a6b932dcb83adf" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.565116 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fxcnw"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.604038 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.612089 4787 scope.go:117] "RemoveContainer" containerID="67da766086ebad503cef99a95012903e7b8add2d9a3444ac4396fc56b2a0e06a" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.613036 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pww2l"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.619189 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.630969 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-87tmc"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.631362 4787 scope.go:117] "RemoveContainer" containerID="29e282650465855593933e61ba2cb1dc7b8fb2cfab016f1ce5e3e6347bd65b64" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.638627 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.654300 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z6slf"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.658498 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.661185 4787 scope.go:117] "RemoveContainer" containerID="7ce8cf61b259780a2db99500dfd133f86ed831cc4d2091572b57c52b88af1268" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.661487 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n44gv"] Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.676281 4787 scope.go:117] "RemoveContainer" containerID="9948c0737e301ff7f0cf74c7324a936e7ac5be3e503b103d06bf9e60af0a059c" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.690385 4787 scope.go:117] "RemoveContainer" containerID="cbb19a2bd8809579d61db7fce3fbd04990a2652440cc2f7a735a7a5300f9c3f4" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.707871 4787 scope.go:117] "RemoveContainer" containerID="c8b8bfb5c8dec79986322d62ed54e8949a3788c19636f158c523e0030b7e765c" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.724605 4787 scope.go:117] "RemoveContainer" containerID="992db4ba1869a0e959a871fd9e722f0037dacb6fcb8a59531ab749b8dc522153" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.739717 4787 scope.go:117] "RemoveContainer" containerID="70872f43e234762504fa14291d7bc5bd220dcd2843936b11aee5e0958022b4ce" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.778370 4787 scope.go:117] "RemoveContainer" containerID="f5e50636ebcbeb34640eebd648a6745b756f16711d3f83fbfe4a49d63175212b" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.815962 4787 scope.go:117] "RemoveContainer" containerID="5cfeebd696c1b311d561af7fe0c7b617b8ef52aa403aa1e6bf8d0041511fb560" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.993315 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" path="/var/lib/kubelet/pods/4f791725-08e7-42f5-b0ee-cd67dfc1fc1b/volumes" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.994353 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60be26cc-9957-4401-85dd-7572bb78975f" path="/var/lib/kubelet/pods/60be26cc-9957-4401-85dd-7572bb78975f/volumes" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.995163 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" path="/var/lib/kubelet/pods/95ac4597-f6a6-4a47-8892-d5b556c3363e/volumes" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.996509 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" path="/var/lib/kubelet/pods/ceef637f-9cff-4fce-95d5-7174181e363d/volumes" Jan 29 13:23:21 crc kubenswrapper[4787]: I0129 13:23:21.997044 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d376a31e-47be-4275-a440-5a961fb875d3" path="/var/lib/kubelet/pods/d376a31e-47be-4275-a440-5a961fb875d3/volumes" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571381 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-42l7p"] Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571673 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571687 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571698 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571704 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571714 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571722 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571731 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571737 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571747 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571753 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.571762 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.571825 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572023 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572029 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572042 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572050 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572060 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572071 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572080 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572088 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572098 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572107 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="extract-utilities" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572120 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572129 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: E0129 13:23:23.572139 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572145 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="extract-content" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572257 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="60be26cc-9957-4401-85dd-7572bb78975f" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572271 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d376a31e-47be-4275-a440-5a961fb875d3" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572282 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f791725-08e7-42f5-b0ee-cd67dfc1fc1b" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572299 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ceef637f-9cff-4fce-95d5-7174181e363d" containerName="marketplace-operator" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.572310 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="95ac4597-f6a6-4a47-8892-d5b556c3363e" containerName="registry-server" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.573617 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.579910 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.585186 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-42l7p"] Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.746073 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-catalog-content\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.746135 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-utilities\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.746326 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv7qb\" (UniqueName: \"kubernetes.io/projected/3ca49f9d-e677-4820-af3d-92b6b1233cc6-kube-api-access-kv7qb\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.769556 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vhgdz"] Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.770672 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.772690 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.784285 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vhgdz"] Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.847967 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-catalog-content\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.848473 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-utilities\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.848517 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv7qb\" (UniqueName: \"kubernetes.io/projected/3ca49f9d-e677-4820-af3d-92b6b1233cc6-kube-api-access-kv7qb\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.848573 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-catalog-content\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.848931 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ca49f9d-e677-4820-af3d-92b6b1233cc6-utilities\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.875738 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv7qb\" (UniqueName: \"kubernetes.io/projected/3ca49f9d-e677-4820-af3d-92b6b1233cc6-kube-api-access-kv7qb\") pod \"certified-operators-42l7p\" (UID: \"3ca49f9d-e677-4820-af3d-92b6b1233cc6\") " pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.899686 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.950024 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-utilities\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.950203 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqmvc\" (UniqueName: \"kubernetes.io/projected/50557508-a648-4ea9-982d-fe0cf3f70b3c-kube-api-access-rqmvc\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:23 crc kubenswrapper[4787]: I0129 13:23:23.950336 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-catalog-content\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.051498 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-catalog-content\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.052009 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-utilities\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.052047 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqmvc\" (UniqueName: \"kubernetes.io/projected/50557508-a648-4ea9-982d-fe0cf3f70b3c-kube-api-access-rqmvc\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.053504 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-utilities\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.053532 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50557508-a648-4ea9-982d-fe0cf3f70b3c-catalog-content\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.074128 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqmvc\" (UniqueName: \"kubernetes.io/projected/50557508-a648-4ea9-982d-fe0cf3f70b3c-kube-api-access-rqmvc\") pod \"community-operators-vhgdz\" (UID: \"50557508-a648-4ea9-982d-fe0cf3f70b3c\") " pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.090280 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.313114 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-42l7p"] Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.511772 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vhgdz"] Jan 29 13:23:24 crc kubenswrapper[4787]: W0129 13:23:24.516298 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50557508_a648_4ea9_982d_fe0cf3f70b3c.slice/crio-605b85a9be7b71627132c0f42d1ad5217c3964be772912ccc7c7265deb9719a8 WatchSource:0}: Error finding container 605b85a9be7b71627132c0f42d1ad5217c3964be772912ccc7c7265deb9719a8: Status 404 returned error can't find the container with id 605b85a9be7b71627132c0f42d1ad5217c3964be772912ccc7c7265deb9719a8 Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.563904 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vhgdz" event={"ID":"50557508-a648-4ea9-982d-fe0cf3f70b3c","Type":"ContainerStarted","Data":"605b85a9be7b71627132c0f42d1ad5217c3964be772912ccc7c7265deb9719a8"} Jan 29 13:23:24 crc kubenswrapper[4787]: I0129 13:23:24.565087 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42l7p" event={"ID":"3ca49f9d-e677-4820-af3d-92b6b1233cc6","Type":"ContainerStarted","Data":"dfd759b85601ed418f408bbd71406e1356ce8672fb188c69db84bd6e4a25fbbf"} Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.582430 4787 generic.go:334] "Generic (PLEG): container finished" podID="3ca49f9d-e677-4820-af3d-92b6b1233cc6" containerID="b1a4bcf68ed5d4d7b226a2d8d3d0d6a63d69b0a051d68fed91f2a519bcfb60f3" exitCode=0 Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.582626 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42l7p" event={"ID":"3ca49f9d-e677-4820-af3d-92b6b1233cc6","Type":"ContainerDied","Data":"b1a4bcf68ed5d4d7b226a2d8d3d0d6a63d69b0a051d68fed91f2a519bcfb60f3"} Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.588342 4787 generic.go:334] "Generic (PLEG): container finished" podID="50557508-a648-4ea9-982d-fe0cf3f70b3c" containerID="a46326b03ab384afd5c1b70ee2fc52cc6328a83e49e5567b82c5c9f459831767" exitCode=0 Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.588418 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vhgdz" event={"ID":"50557508-a648-4ea9-982d-fe0cf3f70b3c","Type":"ContainerDied","Data":"a46326b03ab384afd5c1b70ee2fc52cc6328a83e49e5567b82c5c9f459831767"} Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.972666 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vjqjt"] Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.976313 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.980432 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 29 13:23:25 crc kubenswrapper[4787]: I0129 13:23:25.996939 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vjqjt"] Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.079829 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq2rz\" (UniqueName: \"kubernetes.io/projected/ab1da11b-1f91-4dbe-8dc5-04b11596596b-kube-api-access-kq2rz\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.080279 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-utilities\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.080535 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-catalog-content\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.174658 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6s2hl"] Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.176708 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.179544 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.180059 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6s2hl"] Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.181481 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq2rz\" (UniqueName: \"kubernetes.io/projected/ab1da11b-1f91-4dbe-8dc5-04b11596596b-kube-api-access-kq2rz\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.181550 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-utilities\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.181690 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-catalog-content\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.182720 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-catalog-content\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.182956 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab1da11b-1f91-4dbe-8dc5-04b11596596b-utilities\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.230360 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq2rz\" (UniqueName: \"kubernetes.io/projected/ab1da11b-1f91-4dbe-8dc5-04b11596596b-kube-api-access-kq2rz\") pod \"redhat-marketplace-vjqjt\" (UID: \"ab1da11b-1f91-4dbe-8dc5-04b11596596b\") " pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.283672 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfgrv\" (UniqueName: \"kubernetes.io/projected/8f20d430-2c3b-4ccf-888e-430a6bbf9979-kube-api-access-cfgrv\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.283756 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-utilities\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.283803 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-catalog-content\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.338181 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.384629 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-utilities\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.384690 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-catalog-content\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.384757 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfgrv\" (UniqueName: \"kubernetes.io/projected/8f20d430-2c3b-4ccf-888e-430a6bbf9979-kube-api-access-cfgrv\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.385245 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-utilities\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.385307 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f20d430-2c3b-4ccf-888e-430a6bbf9979-catalog-content\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.408573 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfgrv\" (UniqueName: \"kubernetes.io/projected/8f20d430-2c3b-4ccf-888e-430a6bbf9979-kube-api-access-cfgrv\") pod \"redhat-operators-6s2hl\" (UID: \"8f20d430-2c3b-4ccf-888e-430a6bbf9979\") " pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.499361 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.759944 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vjqjt"] Jan 29 13:23:26 crc kubenswrapper[4787]: W0129 13:23:26.765961 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab1da11b_1f91_4dbe_8dc5_04b11596596b.slice/crio-18f48fa6edfff4d2cb137818a642c541f32800fb968d5b7ff4d199dbba58ca96 WatchSource:0}: Error finding container 18f48fa6edfff4d2cb137818a642c541f32800fb968d5b7ff4d199dbba58ca96: Status 404 returned error can't find the container with id 18f48fa6edfff4d2cb137818a642c541f32800fb968d5b7ff4d199dbba58ca96 Jan 29 13:23:26 crc kubenswrapper[4787]: W0129 13:23:26.910860 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f20d430_2c3b_4ccf_888e_430a6bbf9979.slice/crio-e6ce5b4d918393b134032817c91805222d183d59e4d1a046fa9d560dfb348f77 WatchSource:0}: Error finding container e6ce5b4d918393b134032817c91805222d183d59e4d1a046fa9d560dfb348f77: Status 404 returned error can't find the container with id e6ce5b4d918393b134032817c91805222d183d59e4d1a046fa9d560dfb348f77 Jan 29 13:23:26 crc kubenswrapper[4787]: I0129 13:23:26.912240 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6s2hl"] Jan 29 13:23:27 crc kubenswrapper[4787]: I0129 13:23:27.604326 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6s2hl" event={"ID":"8f20d430-2c3b-4ccf-888e-430a6bbf9979","Type":"ContainerStarted","Data":"e6ce5b4d918393b134032817c91805222d183d59e4d1a046fa9d560dfb348f77"} Jan 29 13:23:27 crc kubenswrapper[4787]: I0129 13:23:27.605723 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vjqjt" event={"ID":"ab1da11b-1f91-4dbe-8dc5-04b11596596b","Type":"ContainerStarted","Data":"18f48fa6edfff4d2cb137818a642c541f32800fb968d5b7ff4d199dbba58ca96"} Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.614474 4787 generic.go:334] "Generic (PLEG): container finished" podID="ab1da11b-1f91-4dbe-8dc5-04b11596596b" containerID="f485ff3ae6b3341e3d4fedee4256cffcb2c54929530fe341cf705d467ff41727" exitCode=0 Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.614570 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vjqjt" event={"ID":"ab1da11b-1f91-4dbe-8dc5-04b11596596b","Type":"ContainerDied","Data":"f485ff3ae6b3341e3d4fedee4256cffcb2c54929530fe341cf705d467ff41727"} Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.617676 4787 generic.go:334] "Generic (PLEG): container finished" podID="8f20d430-2c3b-4ccf-888e-430a6bbf9979" containerID="d64431f91548de42307d2e4d3a3abe306c603c1cf390dc4785eefdff218cbb6e" exitCode=0 Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.617820 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6s2hl" event={"ID":"8f20d430-2c3b-4ccf-888e-430a6bbf9979","Type":"ContainerDied","Data":"d64431f91548de42307d2e4d3a3abe306c603c1cf390dc4785eefdff218cbb6e"} Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.621244 4787 generic.go:334] "Generic (PLEG): container finished" podID="3ca49f9d-e677-4820-af3d-92b6b1233cc6" containerID="2ed75fad85306a302c6fa48f81f2a1169c7dc5783fb7e7f8aa2a101fd6da3fbf" exitCode=0 Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.621323 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42l7p" event={"ID":"3ca49f9d-e677-4820-af3d-92b6b1233cc6","Type":"ContainerDied","Data":"2ed75fad85306a302c6fa48f81f2a1169c7dc5783fb7e7f8aa2a101fd6da3fbf"} Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.626689 4787 generic.go:334] "Generic (PLEG): container finished" podID="50557508-a648-4ea9-982d-fe0cf3f70b3c" containerID="e0f6b8378c15810703889da9c5ac7abcb45a8ad76dd9613d19adc6985e52a336" exitCode=0 Jan 29 13:23:28 crc kubenswrapper[4787]: I0129 13:23:28.626756 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vhgdz" event={"ID":"50557508-a648-4ea9-982d-fe0cf3f70b3c","Type":"ContainerDied","Data":"e0f6b8378c15810703889da9c5ac7abcb45a8ad76dd9613d19adc6985e52a336"} Jan 29 13:23:29 crc kubenswrapper[4787]: I0129 13:23:29.636708 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vhgdz" event={"ID":"50557508-a648-4ea9-982d-fe0cf3f70b3c","Type":"ContainerStarted","Data":"7ad04fe14b51697aa3c489e6cd945b52480f8bd337b544ab74463f14b4f6876f"} Jan 29 13:23:29 crc kubenswrapper[4787]: I0129 13:23:29.662137 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vhgdz" podStartSLOduration=3.073694677 podStartE2EDuration="6.6621184s" podCreationTimestamp="2026-01-29 13:23:23 +0000 UTC" firstStartedPulling="2026-01-29 13:23:25.595586036 +0000 UTC m=+444.356846312" lastFinishedPulling="2026-01-29 13:23:29.184009759 +0000 UTC m=+447.945270035" observedRunningTime="2026-01-29 13:23:29.657972438 +0000 UTC m=+448.419232714" watchObservedRunningTime="2026-01-29 13:23:29.6621184 +0000 UTC m=+448.423378676" Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.645298 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-42l7p" event={"ID":"3ca49f9d-e677-4820-af3d-92b6b1233cc6","Type":"ContainerStarted","Data":"3b515d882911dccfe977d23c0d42005f1edb58e487d6de3011ef92217313da76"} Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.646961 4787 generic.go:334] "Generic (PLEG): container finished" podID="8f20d430-2c3b-4ccf-888e-430a6bbf9979" containerID="a8c548caff0817a924f96a0aa848bc1992449189754bd53597ff6ffa4109bda0" exitCode=0 Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.647056 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6s2hl" event={"ID":"8f20d430-2c3b-4ccf-888e-430a6bbf9979","Type":"ContainerDied","Data":"a8c548caff0817a924f96a0aa848bc1992449189754bd53597ff6ffa4109bda0"} Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.650805 4787 generic.go:334] "Generic (PLEG): container finished" podID="ab1da11b-1f91-4dbe-8dc5-04b11596596b" containerID="f71e6dc8c04d5c75820219c2f47523fc7574ad9816136b0a25c339606f5b5434" exitCode=0 Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.650906 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vjqjt" event={"ID":"ab1da11b-1f91-4dbe-8dc5-04b11596596b","Type":"ContainerDied","Data":"f71e6dc8c04d5c75820219c2f47523fc7574ad9816136b0a25c339606f5b5434"} Jan 29 13:23:30 crc kubenswrapper[4787]: I0129 13:23:30.668707 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-42l7p" podStartSLOduration=3.653623563 podStartE2EDuration="7.668683384s" podCreationTimestamp="2026-01-29 13:23:23 +0000 UTC" firstStartedPulling="2026-01-29 13:23:25.587738133 +0000 UTC m=+444.348998409" lastFinishedPulling="2026-01-29 13:23:29.602797954 +0000 UTC m=+448.364058230" observedRunningTime="2026-01-29 13:23:30.667938532 +0000 UTC m=+449.429198828" watchObservedRunningTime="2026-01-29 13:23:30.668683384 +0000 UTC m=+449.429943660" Jan 29 13:23:31 crc kubenswrapper[4787]: I0129 13:23:31.659601 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6s2hl" event={"ID":"8f20d430-2c3b-4ccf-888e-430a6bbf9979","Type":"ContainerStarted","Data":"b4ae376360d4b91ebb19372c46466fa2a18a6d810d8d28be132627aa6d4ae1f9"} Jan 29 13:23:31 crc kubenswrapper[4787]: I0129 13:23:31.665051 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vjqjt" event={"ID":"ab1da11b-1f91-4dbe-8dc5-04b11596596b","Type":"ContainerStarted","Data":"5e94c3ba076881caae5cf89f69cadc75f8d0eb13fca1c7cb8c19ef674d85ad8a"} Jan 29 13:23:31 crc kubenswrapper[4787]: I0129 13:23:31.708874 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vjqjt" podStartSLOduration=4.1611415019999995 podStartE2EDuration="6.708855353s" podCreationTimestamp="2026-01-29 13:23:25 +0000 UTC" firstStartedPulling="2026-01-29 13:23:28.617284694 +0000 UTC m=+447.378544970" lastFinishedPulling="2026-01-29 13:23:31.164998545 +0000 UTC m=+449.926258821" observedRunningTime="2026-01-29 13:23:31.705533595 +0000 UTC m=+450.466793871" watchObservedRunningTime="2026-01-29 13:23:31.708855353 +0000 UTC m=+450.470115619" Jan 29 13:23:31 crc kubenswrapper[4787]: I0129 13:23:31.709748 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6s2hl" podStartSLOduration=3.169084138 podStartE2EDuration="5.70974221s" podCreationTimestamp="2026-01-29 13:23:26 +0000 UTC" firstStartedPulling="2026-01-29 13:23:28.619470139 +0000 UTC m=+447.380730415" lastFinishedPulling="2026-01-29 13:23:31.160128211 +0000 UTC m=+449.921388487" observedRunningTime="2026-01-29 13:23:31.687647366 +0000 UTC m=+450.448907642" watchObservedRunningTime="2026-01-29 13:23:31.70974221 +0000 UTC m=+450.471002486" Jan 29 13:23:33 crc kubenswrapper[4787]: I0129 13:23:33.899921 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:33 crc kubenswrapper[4787]: I0129 13:23:33.902709 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:33 crc kubenswrapper[4787]: I0129 13:23:33.949029 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:34 crc kubenswrapper[4787]: I0129 13:23:34.091031 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:34 crc kubenswrapper[4787]: I0129 13:23:34.091117 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:34 crc kubenswrapper[4787]: I0129 13:23:34.139780 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:34 crc kubenswrapper[4787]: I0129 13:23:34.727898 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vhgdz" Jan 29 13:23:34 crc kubenswrapper[4787]: I0129 13:23:34.728754 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-42l7p" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.338601 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.339033 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.400859 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.499769 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.499868 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:36 crc kubenswrapper[4787]: I0129 13:23:36.744696 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vjqjt" Jan 29 13:23:37 crc kubenswrapper[4787]: I0129 13:23:37.543953 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6s2hl" podUID="8f20d430-2c3b-4ccf-888e-430a6bbf9979" containerName="registry-server" probeResult="failure" output=< Jan 29 13:23:37 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 13:23:37 crc kubenswrapper[4787]: > Jan 29 13:23:46 crc kubenswrapper[4787]: I0129 13:23:46.549304 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:23:46 crc kubenswrapper[4787]: I0129 13:23:46.592012 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6s2hl" Jan 29 13:24:58 crc kubenswrapper[4787]: I0129 13:24:58.395151 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:24:58 crc kubenswrapper[4787]: I0129 13:24:58.395819 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:25:28 crc kubenswrapper[4787]: I0129 13:25:28.395178 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:25:28 crc kubenswrapper[4787]: I0129 13:25:28.396352 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.395950 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.397132 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.397213 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.398251 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.398358 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5" gracePeriod=600 Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.653977 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5" exitCode=0 Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.654040 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5"} Jan 29 13:25:58 crc kubenswrapper[4787]: I0129 13:25:58.654486 4787 scope.go:117] "RemoveContainer" containerID="7ba8854d1c9db1088e3ba2267b52c1d63c0e87ec784685a87fb7723189cff447" Jan 29 13:25:59 crc kubenswrapper[4787]: I0129 13:25:59.665576 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942"} Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.714729 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ww6xr"] Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.716305 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.742703 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ww6xr"] Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.916648 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-tls\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.916709 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-certificates\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.916736 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rwzq\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-kube-api-access-9rwzq\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.916754 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-bound-sa-token\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.917041 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/381e090f-b0a4-479f-af6c-d5945f2530f8-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.917132 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-trusted-ca\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.917193 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.917239 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/381e090f-b0a4-479f-af6c-d5945f2530f8-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:38 crc kubenswrapper[4787]: I0129 13:26:38.940288 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018550 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/381e090f-b0a4-479f-af6c-d5945f2530f8-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018640 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-trusted-ca\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018694 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/381e090f-b0a4-479f-af6c-d5945f2530f8-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018793 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-tls\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018847 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-certificates\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018889 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rwzq\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-kube-api-access-9rwzq\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.018922 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-bound-sa-token\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.019345 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/381e090f-b0a4-479f-af6c-d5945f2530f8-ca-trust-extracted\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.020572 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-trusted-ca\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.020864 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-certificates\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.027409 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/381e090f-b0a4-479f-af6c-d5945f2530f8-installation-pull-secrets\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.030633 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-registry-tls\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.037999 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-bound-sa-token\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.043745 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rwzq\" (UniqueName: \"kubernetes.io/projected/381e090f-b0a4-479f-af6c-d5945f2530f8-kube-api-access-9rwzq\") pod \"image-registry-66df7c8f76-ww6xr\" (UID: \"381e090f-b0a4-479f-af6c-d5945f2530f8\") " pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.338229 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.676092 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-ww6xr"] Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.949654 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" event={"ID":"381e090f-b0a4-479f-af6c-d5945f2530f8","Type":"ContainerStarted","Data":"c60b281ccc03d231c6f072a861e75d774abf61520b273fae75342e850843f3c9"} Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.950357 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" event={"ID":"381e090f-b0a4-479f-af6c-d5945f2530f8","Type":"ContainerStarted","Data":"fd261e3b1a948ec5a26f88e3ff1ce8fec3ba2da473c86a1ea5b841909365fd9d"} Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.950387 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:39 crc kubenswrapper[4787]: I0129 13:26:39.975129 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" podStartSLOduration=1.9751016190000001 podStartE2EDuration="1.975101619s" podCreationTimestamp="2026-01-29 13:26:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:26:39.972377269 +0000 UTC m=+638.733637575" watchObservedRunningTime="2026-01-29 13:26:39.975101619 +0000 UTC m=+638.736361905" Jan 29 13:26:59 crc kubenswrapper[4787]: I0129 13:26:59.352734 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-ww6xr" Jan 29 13:26:59 crc kubenswrapper[4787]: I0129 13:26:59.441891 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.493204 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" podUID="945c6d0d-6e91-4805-937d-401bd0742688" containerName="registry" containerID="cri-o://b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575" gracePeriod=30 Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.887611 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.991695 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.991919 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.991967 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ld5l\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.991994 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.992035 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.992075 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.992120 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.992150 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls\") pod \"945c6d0d-6e91-4805-937d-401bd0742688\" (UID: \"945c6d0d-6e91-4805-937d-401bd0742688\") " Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.992968 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.993294 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.997768 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.998439 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.998956 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:27:24 crc kubenswrapper[4787]: I0129 13:27:24.999535 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l" (OuterVolumeSpecName: "kube-api-access-8ld5l") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "kube-api-access-8ld5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.016786 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.019583 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "945c6d0d-6e91-4805-937d-401bd0742688" (UID: "945c6d0d-6e91-4805-937d-401bd0742688"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093395 4787 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093447 4787 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093501 4787 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093514 4787 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/945c6d0d-6e91-4805-937d-401bd0742688-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093527 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ld5l\" (UniqueName: \"kubernetes.io/projected/945c6d0d-6e91-4805-937d-401bd0742688-kube-api-access-8ld5l\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093537 4787 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/945c6d0d-6e91-4805-937d-401bd0742688-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.093548 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/945c6d0d-6e91-4805-937d-401bd0742688-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.291018 4787 generic.go:334] "Generic (PLEG): container finished" podID="945c6d0d-6e91-4805-937d-401bd0742688" containerID="b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575" exitCode=0 Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.291513 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" event={"ID":"945c6d0d-6e91-4805-937d-401bd0742688","Type":"ContainerDied","Data":"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575"} Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.291555 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" event={"ID":"945c6d0d-6e91-4805-937d-401bd0742688","Type":"ContainerDied","Data":"e65b4d6608ecfac496fa424c991b0e519098811b5bedb9c14711f5f019fbec17"} Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.291583 4787 scope.go:117] "RemoveContainer" containerID="b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.291792 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-fq9qf" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.314862 4787 scope.go:117] "RemoveContainer" containerID="b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575" Jan 29 13:27:25 crc kubenswrapper[4787]: E0129 13:27:25.316192 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575\": container with ID starting with b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575 not found: ID does not exist" containerID="b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.316248 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575"} err="failed to get container status \"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575\": rpc error: code = NotFound desc = could not find container \"b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575\": container with ID starting with b95be9894702ecb2ed3ca00861f43ff3c67a3d2aa3d991943dc1a41eb7733575 not found: ID does not exist" Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.330849 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.335216 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-fq9qf"] Jan 29 13:27:25 crc kubenswrapper[4787]: I0129 13:27:25.995611 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="945c6d0d-6e91-4805-937d-401bd0742688" path="/var/lib/kubelet/pods/945c6d0d-6e91-4805-937d-401bd0742688/volumes" Jan 29 13:27:58 crc kubenswrapper[4787]: I0129 13:27:58.394686 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:27:58 crc kubenswrapper[4787]: I0129 13:27:58.395910 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:28:28 crc kubenswrapper[4787]: I0129 13:28:28.395588 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:28:28 crc kubenswrapper[4787]: I0129 13:28:28.396483 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:28:43 crc kubenswrapper[4787]: I0129 13:28:43.369380 4787 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.395099 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.396084 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.396165 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.397058 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.397169 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942" gracePeriod=600 Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.962782 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942" exitCode=0 Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.962841 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942"} Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.963375 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b"} Jan 29 13:28:58 crc kubenswrapper[4787]: I0129 13:28:58.963403 4787 scope.go:117] "RemoveContainer" containerID="bb5ac9dd4e381fa95aa4a3ebd35c4933b0fa395c23151c515966ae47561636f5" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.502722 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pq2mb"] Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504337 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-controller" containerID="cri-o://7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504418 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="northd" containerID="cri-o://3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504431 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="sbdb" containerID="cri-o://b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504548 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-acl-logging" containerID="cri-o://d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504548 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-node" containerID="cri-o://fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504701 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.504672 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="nbdb" containerID="cri-o://dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.559302 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" containerID="cri-o://05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" gracePeriod=30 Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.864874 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/3.log" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.868868 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovn-acl-logging/0.log" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.869397 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovn-controller/0.log" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.870371 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927220 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fgk79"] Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927523 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="nbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927540 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="nbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927555 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927565 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927577 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927586 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927598 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="945c6d0d-6e91-4805-937d-401bd0742688" containerName="registry" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927608 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="945c6d0d-6e91-4805-937d-401bd0742688" containerName="registry" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927619 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="sbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927627 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="sbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927638 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kubecfg-setup" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927646 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kubecfg-setup" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927656 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-node" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927665 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-node" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927681 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="northd" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927689 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="northd" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927700 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927709 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927721 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-acl-logging" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927729 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-acl-logging" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927744 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927752 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.927762 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927771 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927919 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927942 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="sbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927957 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927971 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-ovn-metrics" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.927988 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="northd" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928006 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928018 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="kube-rbac-proxy-node" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928028 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="nbdb" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928038 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-acl-logging" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928047 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovn-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928056 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="945c6d0d-6e91-4805-937d-401bd0742688" containerName="registry" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.928180 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928190 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: E0129 13:29:25.928202 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928212 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928339 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.928608 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerName="ovnkube-controller" Jan 29 13:29:25 crc kubenswrapper[4787]: I0129 13:29:25.930589 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.010876 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlqsj\" (UniqueName: \"kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011001 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011052 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011144 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011188 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011220 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011255 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011290 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011325 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011345 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011376 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011493 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011330 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011591 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011623 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011659 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011710 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket" (OuterVolumeSpecName: "log-socket") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011733 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log" (OuterVolumeSpecName: "node-log") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011801 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011916 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011861 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash" (OuterVolumeSpecName: "host-slash") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011952 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011985 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.011437 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012068 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012134 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012198 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012224 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012258 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012300 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012333 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012336 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012403 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012372 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012472 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin\") pod \"55309602-3b5c-4506-8cad-0c1609e2b1cb\" (UID: \"55309602-3b5c-4506-8cad-0c1609e2b1cb\") " Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012507 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012648 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012741 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-config\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012794 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-script-lib\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012820 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5jj4\" (UniqueName: \"kubernetes.io/projected/446f0428-1c28-4b1b-b3ce-366e383f7c61-kube-api-access-s5jj4\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012856 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-systemd-units\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012893 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-systemd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012915 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-log-socket\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012944 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-etc-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012973 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-kubelet\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.012999 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013031 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovn-node-metrics-cert\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013057 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013084 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-node-log\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013133 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-slash\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013156 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-var-lib-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013235 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-bin\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013383 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-netns\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013477 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-netd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013539 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-env-overrides\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013584 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-ovn\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013706 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013861 4787 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013890 4787 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013912 4787 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013931 4787 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013948 4787 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013966 4787 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013981 4787 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-node-log\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.013996 4787 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-log-socket\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014013 4787 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-slash\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014029 4787 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014045 4787 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014061 4787 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014077 4787 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55309602-3b5c-4506-8cad-0c1609e2b1cb-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014092 4787 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014108 4787 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014124 4787 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.014139 4787 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.020489 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.020872 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj" (OuterVolumeSpecName: "kube-api-access-qlqsj") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "kube-api-access-qlqsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.035795 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "55309602-3b5c-4506-8cad-0c1609e2b1cb" (UID: "55309602-3b5c-4506-8cad-0c1609e2b1cb"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116069 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-slash\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116162 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-var-lib-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116198 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-bin\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116321 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-bin\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-slash\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116424 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-netns\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-var-lib-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116565 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-netd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116609 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-netns\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116768 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-cni-netd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116826 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-env-overrides\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116911 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-ovn\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.116978 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117032 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-ovn\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117034 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-config\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117085 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-run-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117118 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-script-lib\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117159 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5jj4\" (UniqueName: \"kubernetes.io/projected/446f0428-1c28-4b1b-b3ce-366e383f7c61-kube-api-access-s5jj4\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117198 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-systemd-units\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117269 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-systemd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117342 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-systemd-units\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117391 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-log-socket\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117506 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-log-socket\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117523 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-systemd\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117546 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-etc-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117581 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-etc-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117659 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-kubelet\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117693 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117750 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-kubelet\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117743 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovn-node-metrics-cert\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117902 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.117966 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-env-overrides\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.118308 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-config\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.119411 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovnkube-script-lib\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121099 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121174 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-node-log\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121256 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlqsj\" (UniqueName: \"kubernetes.io/projected/55309602-3b5c-4506-8cad-0c1609e2b1cb-kube-api-access-qlqsj\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121273 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55309602-3b5c-4506-8cad-0c1609e2b1cb-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121288 4787 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55309602-3b5c-4506-8cad-0c1609e2b1cb-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121343 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-node-log\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121396 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/446f0428-1c28-4b1b-b3ce-366e383f7c61-run-openvswitch\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.121793 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/446f0428-1c28-4b1b-b3ce-366e383f7c61-ovn-node-metrics-cert\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.137041 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5jj4\" (UniqueName: \"kubernetes.io/projected/446f0428-1c28-4b1b-b3ce-366e383f7c61-kube-api-access-s5jj4\") pod \"ovnkube-node-fgk79\" (UID: \"446f0428-1c28-4b1b-b3ce-366e383f7c61\") " pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.174844 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovnkube-controller/3.log" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.180280 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovn-acl-logging/0.log" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.182884 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-pq2mb_55309602-3b5c-4506-8cad-0c1609e2b1cb/ovn-controller/0.log" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184355 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184413 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184430 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184447 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184494 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184510 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" exitCode=0 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184513 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184550 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184605 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184627 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184648 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184675 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184698 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184720 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184740 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184754 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184765 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184776 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184789 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184803 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184815 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184826 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184841 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184858 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184871 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184882 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184893 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184904 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184915 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184926 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184937 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184948 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184960 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184997 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.184524 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" exitCode=143 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185318 4787 generic.go:334] "Generic (PLEG): container finished" podID="55309602-3b5c-4506-8cad-0c1609e2b1cb" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" exitCode=143 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185394 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185420 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185563 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185581 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185593 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185605 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185655 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185669 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185682 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185693 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185705 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185765 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-pq2mb" event={"ID":"55309602-3b5c-4506-8cad-0c1609e2b1cb","Type":"ContainerDied","Data":"ba1d20726317a0f2bc7348ce80e895451b6bfaaa56fd69a9a9939c6428d63ca0"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185785 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185844 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185857 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185870 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185881 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185929 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185943 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185959 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.185972 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.186017 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.190598 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/2.log" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.194091 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/1.log" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.194167 4787 generic.go:334] "Generic (PLEG): container finished" podID="d2526766-68ea-4959-a656-b0c68c754890" containerID="84b604b8776c920b884a601246c8598c99b2b3b2060c5a7f6c4aae009d172c7e" exitCode=2 Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.194235 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerDied","Data":"84b604b8776c920b884a601246c8598c99b2b3b2060c5a7f6c4aae009d172c7e"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.194286 4787 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7"} Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.195273 4787 scope.go:117] "RemoveContainer" containerID="84b604b8776c920b884a601246c8598c99b2b3b2060c5a7f6c4aae009d172c7e" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.241780 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.250358 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.259631 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pq2mb"] Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.265678 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-pq2mb"] Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.298759 4787 scope.go:117] "RemoveContainer" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.328537 4787 scope.go:117] "RemoveContainer" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.351630 4787 scope.go:117] "RemoveContainer" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.368335 4787 scope.go:117] "RemoveContainer" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.394510 4787 scope.go:117] "RemoveContainer" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.419070 4787 scope.go:117] "RemoveContainer" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.438754 4787 scope.go:117] "RemoveContainer" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.530082 4787 scope.go:117] "RemoveContainer" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.544045 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.544789 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.544869 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} err="failed to get container status \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.544903 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.548611 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": container with ID starting with 11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf not found: ID does not exist" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.548654 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} err="failed to get container status \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": rpc error: code = NotFound desc = could not find container \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": container with ID starting with 11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.548683 4787 scope.go:117] "RemoveContainer" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.549280 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": container with ID starting with b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d not found: ID does not exist" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.549334 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} err="failed to get container status \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": rpc error: code = NotFound desc = could not find container \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": container with ID starting with b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.549375 4787 scope.go:117] "RemoveContainer" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.549826 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": container with ID starting with dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1 not found: ID does not exist" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.549895 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} err="failed to get container status \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": rpc error: code = NotFound desc = could not find container \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": container with ID starting with dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.549938 4787 scope.go:117] "RemoveContainer" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.550268 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": container with ID starting with 3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7 not found: ID does not exist" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.550320 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} err="failed to get container status \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": rpc error: code = NotFound desc = could not find container \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": container with ID starting with 3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.550354 4787 scope.go:117] "RemoveContainer" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.551411 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": container with ID starting with 75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7 not found: ID does not exist" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.551444 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} err="failed to get container status \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": rpc error: code = NotFound desc = could not find container \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": container with ID starting with 75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.551480 4787 scope.go:117] "RemoveContainer" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.551729 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": container with ID starting with fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f not found: ID does not exist" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.551758 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} err="failed to get container status \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": rpc error: code = NotFound desc = could not find container \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": container with ID starting with fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.551783 4787 scope.go:117] "RemoveContainer" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.552040 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": container with ID starting with d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05 not found: ID does not exist" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552067 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} err="failed to get container status \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": rpc error: code = NotFound desc = could not find container \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": container with ID starting with d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552087 4787 scope.go:117] "RemoveContainer" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.552347 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": container with ID starting with 7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7 not found: ID does not exist" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552370 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} err="failed to get container status \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": rpc error: code = NotFound desc = could not find container \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": container with ID starting with 7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552385 4787 scope.go:117] "RemoveContainer" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: E0129 13:29:26.552631 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": container with ID starting with cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0 not found: ID does not exist" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552659 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} err="failed to get container status \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": rpc error: code = NotFound desc = could not find container \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": container with ID starting with cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552675 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552936 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} err="failed to get container status \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.552958 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553220 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} err="failed to get container status \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": rpc error: code = NotFound desc = could not find container \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": container with ID starting with 11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553257 4787 scope.go:117] "RemoveContainer" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553498 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} err="failed to get container status \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": rpc error: code = NotFound desc = could not find container \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": container with ID starting with b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553520 4787 scope.go:117] "RemoveContainer" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553785 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} err="failed to get container status \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": rpc error: code = NotFound desc = could not find container \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": container with ID starting with dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.553840 4787 scope.go:117] "RemoveContainer" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.554292 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} err="failed to get container status \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": rpc error: code = NotFound desc = could not find container \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": container with ID starting with 3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.554321 4787 scope.go:117] "RemoveContainer" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.554661 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} err="failed to get container status \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": rpc error: code = NotFound desc = could not find container \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": container with ID starting with 75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.554699 4787 scope.go:117] "RemoveContainer" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.555128 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} err="failed to get container status \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": rpc error: code = NotFound desc = could not find container \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": container with ID starting with fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.555154 4787 scope.go:117] "RemoveContainer" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.555673 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} err="failed to get container status \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": rpc error: code = NotFound desc = could not find container \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": container with ID starting with d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.555699 4787 scope.go:117] "RemoveContainer" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.555980 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} err="failed to get container status \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": rpc error: code = NotFound desc = could not find container \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": container with ID starting with 7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.556021 4787 scope.go:117] "RemoveContainer" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.556404 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} err="failed to get container status \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": rpc error: code = NotFound desc = could not find container \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": container with ID starting with cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.556430 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.556777 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} err="failed to get container status \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.556805 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557107 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} err="failed to get container status \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": rpc error: code = NotFound desc = could not find container \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": container with ID starting with 11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557126 4787 scope.go:117] "RemoveContainer" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557423 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} err="failed to get container status \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": rpc error: code = NotFound desc = could not find container \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": container with ID starting with b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557442 4787 scope.go:117] "RemoveContainer" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557949 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} err="failed to get container status \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": rpc error: code = NotFound desc = could not find container \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": container with ID starting with dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.557986 4787 scope.go:117] "RemoveContainer" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.558289 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} err="failed to get container status \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": rpc error: code = NotFound desc = could not find container \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": container with ID starting with 3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.558317 4787 scope.go:117] "RemoveContainer" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.558751 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} err="failed to get container status \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": rpc error: code = NotFound desc = could not find container \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": container with ID starting with 75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.558776 4787 scope.go:117] "RemoveContainer" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.559230 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} err="failed to get container status \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": rpc error: code = NotFound desc = could not find container \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": container with ID starting with fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.559252 4787 scope.go:117] "RemoveContainer" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.559631 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} err="failed to get container status \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": rpc error: code = NotFound desc = could not find container \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": container with ID starting with d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.559653 4787 scope.go:117] "RemoveContainer" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560018 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} err="failed to get container status \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": rpc error: code = NotFound desc = could not find container \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": container with ID starting with 7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560042 4787 scope.go:117] "RemoveContainer" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560339 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} err="failed to get container status \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": rpc error: code = NotFound desc = could not find container \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": container with ID starting with cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560363 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560628 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} err="failed to get container status \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560653 4787 scope.go:117] "RemoveContainer" containerID="11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560932 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf"} err="failed to get container status \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": rpc error: code = NotFound desc = could not find container \"11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf\": container with ID starting with 11ae440185ec53a42887c0bebddd135a6dd165a5bc5656880e1a12e0b96909cf not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.560958 4787 scope.go:117] "RemoveContainer" containerID="b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561172 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d"} err="failed to get container status \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": rpc error: code = NotFound desc = could not find container \"b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d\": container with ID starting with b75c8155e0765ae2f7b580a89883f78b04ee414d6e049d81fdea9b8378bdc05d not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561197 4787 scope.go:117] "RemoveContainer" containerID="dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561431 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1"} err="failed to get container status \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": rpc error: code = NotFound desc = could not find container \"dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1\": container with ID starting with dc9cf473aa876fa3b02313fe6b67a8a00776eefbe81433c829b8fba12fa344d1 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561469 4787 scope.go:117] "RemoveContainer" containerID="3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561701 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7"} err="failed to get container status \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": rpc error: code = NotFound desc = could not find container \"3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7\": container with ID starting with 3c51e1e89b29eac53aecd69870a50856bb4eece6aadbeff2ab9aab37823458c7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561723 4787 scope.go:117] "RemoveContainer" containerID="75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561942 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7"} err="failed to get container status \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": rpc error: code = NotFound desc = could not find container \"75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7\": container with ID starting with 75b98d0bb001f99ea32708ff673d055858a4ea6757c30a4dc8e0c5bc9998bad7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.561965 4787 scope.go:117] "RemoveContainer" containerID="fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562209 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f"} err="failed to get container status \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": rpc error: code = NotFound desc = could not find container \"fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f\": container with ID starting with fe0599dcd6a0d9ced8ec2461222b4a018036ff16d261add578948516b81c930f not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562230 4787 scope.go:117] "RemoveContainer" containerID="d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562482 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05"} err="failed to get container status \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": rpc error: code = NotFound desc = could not find container \"d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05\": container with ID starting with d13ad42593fe7fbf352272e0d3f11cb8e76e714a8441933a9b3c9cfe4be00c05 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562499 4787 scope.go:117] "RemoveContainer" containerID="7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562721 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7"} err="failed to get container status \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": rpc error: code = NotFound desc = could not find container \"7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7\": container with ID starting with 7b236e901f12c19e787f7411328351f76eb5300d12f592f6ce237aaef0ae6dc7 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562741 4787 scope.go:117] "RemoveContainer" containerID="cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.562998 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0"} err="failed to get container status \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": rpc error: code = NotFound desc = could not find container \"cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0\": container with ID starting with cb4dec64047563a141d9559595b5b2bbf79dcd27e00f97fa96b48d29e8a4abb0 not found: ID does not exist" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.563027 4787 scope.go:117] "RemoveContainer" containerID="05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18" Jan 29 13:29:26 crc kubenswrapper[4787]: I0129 13:29:26.563280 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18"} err="failed to get container status \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": rpc error: code = NotFound desc = could not find container \"05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18\": container with ID starting with 05c6a9061c96ec94c00853e635a52a9c38d737ede0aa33152f3a447dd780da18 not found: ID does not exist" Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.204151 4787 generic.go:334] "Generic (PLEG): container finished" podID="446f0428-1c28-4b1b-b3ce-366e383f7c61" containerID="a33ee5a78223aadcb003cca29df2b0aba5b4c901118d2b1aef333bd1ee5b352a" exitCode=0 Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.204269 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerDied","Data":"a33ee5a78223aadcb003cca29df2b0aba5b4c901118d2b1aef333bd1ee5b352a"} Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.204360 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"b1c9a338b8f1ff3ee242fb50f39da0457868cd46ea92a7a26158dd9537159100"} Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.210825 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/2.log" Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.211834 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/1.log" Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.211956 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j6wn4" event={"ID":"d2526766-68ea-4959-a656-b0c68c754890","Type":"ContainerStarted","Data":"942f34567db9345783d11edfcb5585f2b7ae28e0d21cbc1deee618c6b8efc2f4"} Jan 29 13:29:27 crc kubenswrapper[4787]: I0129 13:29:27.995307 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55309602-3b5c-4506-8cad-0c1609e2b1cb" path="/var/lib/kubelet/pods/55309602-3b5c-4506-8cad-0c1609e2b1cb/volumes" Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229618 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"d020836c062afbee6ecbd2af6bfc41f5c88c5ab997f3d24a4a28d3acb05d1df3"} Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229673 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"d1e518fc774e3972f159d2fb3c83eaec31e220f2d486245d4dc381a7f8d20f2a"} Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229684 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"36ade6fb0b81db06aa2363a910a27f87237454d3b16a4eb31c0ea8b9d68be654"} Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229695 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"e8c522a2d960eba487b55288214108a6383579fec7815948c64f342d91cd87cb"} Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229703 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"ef6309628685888ee31b9e83bd6233a34c242a114f7caf80cfad25306ae665e7"} Jan 29 13:29:28 crc kubenswrapper[4787]: I0129 13:29:28.229714 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"6b1f691dc596318d21f56c357f304768e0ae7e1311c8297a7ec4b26ebb06d04f"} Jan 29 13:29:31 crc kubenswrapper[4787]: I0129 13:29:31.267649 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"aa89ee9e2361b2ce704df54dff7cbaedb304ff66b7a5acfc571bc5e5921b56a6"} Jan 29 13:29:33 crc kubenswrapper[4787]: I0129 13:29:33.283684 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" event={"ID":"446f0428-1c28-4b1b-b3ce-366e383f7c61","Type":"ContainerStarted","Data":"3ae3600a9a98247ee95629b5bffcb29c5c39e51529c20ea299e833c0ca05ef54"} Jan 29 13:29:33 crc kubenswrapper[4787]: I0129 13:29:33.284505 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:33 crc kubenswrapper[4787]: I0129 13:29:33.284528 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:33 crc kubenswrapper[4787]: I0129 13:29:33.312434 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:33 crc kubenswrapper[4787]: I0129 13:29:33.320739 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" podStartSLOduration=8.320710006 podStartE2EDuration="8.320710006s" podCreationTimestamp="2026-01-29 13:29:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:29:33.316430979 +0000 UTC m=+812.077691255" watchObservedRunningTime="2026-01-29 13:29:33.320710006 +0000 UTC m=+812.081970322" Jan 29 13:29:34 crc kubenswrapper[4787]: I0129 13:29:34.292055 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:34 crc kubenswrapper[4787]: I0129 13:29:34.333184 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.187549 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-f898k"] Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.189970 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.199101 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.199641 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.200094 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.200615 4787 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-7ds4c" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.206194 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-f898k"] Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.251160 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.251804 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x722b\" (UniqueName: \"kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.252007 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.353571 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.353744 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.353857 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x722b\" (UniqueName: \"kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.353953 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.357928 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.384024 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x722b\" (UniqueName: \"kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b\") pod \"crc-storage-crc-f898k\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:39 crc kubenswrapper[4787]: I0129 13:29:39.560918 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:40 crc kubenswrapper[4787]: I0129 13:29:40.007702 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-f898k"] Jan 29 13:29:40 crc kubenswrapper[4787]: W0129 13:29:40.019256 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d647baa_f247_4407_851b_a05fa4fed02c.slice/crio-9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61 WatchSource:0}: Error finding container 9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61: Status 404 returned error can't find the container with id 9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61 Jan 29 13:29:40 crc kubenswrapper[4787]: I0129 13:29:40.023510 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:29:40 crc kubenswrapper[4787]: I0129 13:29:40.346894 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-f898k" event={"ID":"3d647baa-f247-4407-851b-a05fa4fed02c","Type":"ContainerStarted","Data":"9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61"} Jan 29 13:29:42 crc kubenswrapper[4787]: I0129 13:29:42.363394 4787 generic.go:334] "Generic (PLEG): container finished" podID="3d647baa-f247-4407-851b-a05fa4fed02c" containerID="20ae87328136876b33fc81e034355830e128f15f75d30bd85ab421ce0e0d0df3" exitCode=0 Jan 29 13:29:42 crc kubenswrapper[4787]: I0129 13:29:42.363576 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-f898k" event={"ID":"3d647baa-f247-4407-851b-a05fa4fed02c","Type":"ContainerDied","Data":"20ae87328136876b33fc81e034355830e128f15f75d30bd85ab421ce0e0d0df3"} Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.712729 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.852883 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x722b\" (UniqueName: \"kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b\") pod \"3d647baa-f247-4407-851b-a05fa4fed02c\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.852981 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt\") pod \"3d647baa-f247-4407-851b-a05fa4fed02c\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.853089 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage\") pod \"3d647baa-f247-4407-851b-a05fa4fed02c\" (UID: \"3d647baa-f247-4407-851b-a05fa4fed02c\") " Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.853222 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "3d647baa-f247-4407-851b-a05fa4fed02c" (UID: "3d647baa-f247-4407-851b-a05fa4fed02c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.853559 4787 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3d647baa-f247-4407-851b-a05fa4fed02c-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.861436 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b" (OuterVolumeSpecName: "kube-api-access-x722b") pod "3d647baa-f247-4407-851b-a05fa4fed02c" (UID: "3d647baa-f247-4407-851b-a05fa4fed02c"). InnerVolumeSpecName "kube-api-access-x722b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.878961 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "3d647baa-f247-4407-851b-a05fa4fed02c" (UID: "3d647baa-f247-4407-851b-a05fa4fed02c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.954643 4787 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3d647baa-f247-4407-851b-a05fa4fed02c-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:43 crc kubenswrapper[4787]: I0129 13:29:43.954687 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x722b\" (UniqueName: \"kubernetes.io/projected/3d647baa-f247-4407-851b-a05fa4fed02c-kube-api-access-x722b\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:44 crc kubenswrapper[4787]: I0129 13:29:44.380584 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-f898k" event={"ID":"3d647baa-f247-4407-851b-a05fa4fed02c","Type":"ContainerDied","Data":"9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61"} Jan 29 13:29:44 crc kubenswrapper[4787]: I0129 13:29:44.381020 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bfeda47f41367c11bf527bb1e48ecc3b6eb2bcc71a6cd318dff056bafa8fb61" Jan 29 13:29:44 crc kubenswrapper[4787]: I0129 13:29:44.380658 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-f898k" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.898570 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4"] Jan 29 13:29:51 crc kubenswrapper[4787]: E0129 13:29:51.899497 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d647baa-f247-4407-851b-a05fa4fed02c" containerName="storage" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.899519 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d647baa-f247-4407-851b-a05fa4fed02c" containerName="storage" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.899697 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d647baa-f247-4407-851b-a05fa4fed02c" containerName="storage" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.901286 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.905023 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 13:29:51 crc kubenswrapper[4787]: I0129 13:29:51.912066 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4"] Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.003849 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.003950 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5csn2\" (UniqueName: \"kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.004079 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.106867 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.106978 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5csn2\" (UniqueName: \"kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.107187 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.108603 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.109734 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.139647 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5csn2\" (UniqueName: \"kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.223956 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:52 crc kubenswrapper[4787]: I0129 13:29:52.500123 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4"] Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.456651 4787 generic.go:334] "Generic (PLEG): container finished" podID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerID="8b5bd06611095ddb59a7a772fff381154e50b979fac1cf40d5a21ae90dc508f3" exitCode=0 Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.456760 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" event={"ID":"07d08c2a-7015-4fca-9056-da4ee86a0d95","Type":"ContainerDied","Data":"8b5bd06611095ddb59a7a772fff381154e50b979fac1cf40d5a21ae90dc508f3"} Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.457222 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" event={"ID":"07d08c2a-7015-4fca-9056-da4ee86a0d95","Type":"ContainerStarted","Data":"6124219c07320f06fe9bd892e59b7115ab155b800354dd9407c42ee34e225f4d"} Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.926930 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.930422 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:53 crc kubenswrapper[4787]: I0129 13:29:53.944322 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.036069 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p42hz\" (UniqueName: \"kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.036540 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.036738 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.140749 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.141298 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.141311 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.141683 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p42hz\" (UniqueName: \"kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.141919 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.169029 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p42hz\" (UniqueName: \"kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz\") pod \"redhat-operators-tqc9f\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.266492 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:29:54 crc kubenswrapper[4787]: I0129 13:29:54.709042 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:29:55 crc kubenswrapper[4787]: I0129 13:29:55.473338 4787 generic.go:334] "Generic (PLEG): container finished" podID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerID="8740119dd9a0a8cea67bcf62e4b147e5da654e4a2392caeddac0646f724e6c59" exitCode=0 Jan 29 13:29:55 crc kubenswrapper[4787]: I0129 13:29:55.473481 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" event={"ID":"07d08c2a-7015-4fca-9056-da4ee86a0d95","Type":"ContainerDied","Data":"8740119dd9a0a8cea67bcf62e4b147e5da654e4a2392caeddac0646f724e6c59"} Jan 29 13:29:55 crc kubenswrapper[4787]: I0129 13:29:55.476020 4787 generic.go:334] "Generic (PLEG): container finished" podID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerID="2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c" exitCode=0 Jan 29 13:29:55 crc kubenswrapper[4787]: I0129 13:29:55.476066 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerDied","Data":"2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c"} Jan 29 13:29:55 crc kubenswrapper[4787]: I0129 13:29:55.476122 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerStarted","Data":"bf8bf5c5cd96db0996f022c5b0466db100fba15be37fc2b20f62277b60f66e0f"} Jan 29 13:29:56 crc kubenswrapper[4787]: I0129 13:29:56.283843 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fgk79" Jan 29 13:29:56 crc kubenswrapper[4787]: I0129 13:29:56.485308 4787 generic.go:334] "Generic (PLEG): container finished" podID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerID="7621e1e8f2882c945b520e9493c47f5f88340eff97be18091fb571d697432060" exitCode=0 Jan 29 13:29:56 crc kubenswrapper[4787]: I0129 13:29:56.485364 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" event={"ID":"07d08c2a-7015-4fca-9056-da4ee86a0d95","Type":"ContainerDied","Data":"7621e1e8f2882c945b520e9493c47f5f88340eff97be18091fb571d697432060"} Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.767997 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.897118 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util\") pod \"07d08c2a-7015-4fca-9056-da4ee86a0d95\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.897251 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5csn2\" (UniqueName: \"kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2\") pod \"07d08c2a-7015-4fca-9056-da4ee86a0d95\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.897343 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle\") pod \"07d08c2a-7015-4fca-9056-da4ee86a0d95\" (UID: \"07d08c2a-7015-4fca-9056-da4ee86a0d95\") " Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.898360 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle" (OuterVolumeSpecName: "bundle") pod "07d08c2a-7015-4fca-9056-da4ee86a0d95" (UID: "07d08c2a-7015-4fca-9056-da4ee86a0d95"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.917616 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2" (OuterVolumeSpecName: "kube-api-access-5csn2") pod "07d08c2a-7015-4fca-9056-da4ee86a0d95" (UID: "07d08c2a-7015-4fca-9056-da4ee86a0d95"). InnerVolumeSpecName "kube-api-access-5csn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.933689 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util" (OuterVolumeSpecName: "util") pod "07d08c2a-7015-4fca-9056-da4ee86a0d95" (UID: "07d08c2a-7015-4fca-9056-da4ee86a0d95"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.999662 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5csn2\" (UniqueName: \"kubernetes.io/projected/07d08c2a-7015-4fca-9056-da4ee86a0d95-kube-api-access-5csn2\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.999702 4787 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:57 crc kubenswrapper[4787]: I0129 13:29:57.999718 4787 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/07d08c2a-7015-4fca-9056-da4ee86a0d95-util\") on node \"crc\" DevicePath \"\"" Jan 29 13:29:58 crc kubenswrapper[4787]: I0129 13:29:58.505192 4787 generic.go:334] "Generic (PLEG): container finished" podID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerID="fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369" exitCode=0 Jan 29 13:29:58 crc kubenswrapper[4787]: I0129 13:29:58.505249 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerDied","Data":"fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369"} Jan 29 13:29:58 crc kubenswrapper[4787]: I0129 13:29:58.511926 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" event={"ID":"07d08c2a-7015-4fca-9056-da4ee86a0d95","Type":"ContainerDied","Data":"6124219c07320f06fe9bd892e59b7115ab155b800354dd9407c42ee34e225f4d"} Jan 29 13:29:58 crc kubenswrapper[4787]: I0129 13:29:58.511999 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6124219c07320f06fe9bd892e59b7115ab155b800354dd9407c42ee34e225f4d" Jan 29 13:29:58 crc kubenswrapper[4787]: I0129 13:29:58.512110 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4" Jan 29 13:29:59 crc kubenswrapper[4787]: I0129 13:29:59.526392 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerStarted","Data":"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399"} Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.170995 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tqc9f" podStartSLOduration=3.742125031 podStartE2EDuration="7.170962168s" podCreationTimestamp="2026-01-29 13:29:53 +0000 UTC" firstStartedPulling="2026-01-29 13:29:55.477318661 +0000 UTC m=+834.238578937" lastFinishedPulling="2026-01-29 13:29:58.906155778 +0000 UTC m=+837.667416074" observedRunningTime="2026-01-29 13:29:59.552781648 +0000 UTC m=+838.314041974" watchObservedRunningTime="2026-01-29 13:30:00.170962168 +0000 UTC m=+838.932222464" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.171755 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k"] Jan 29 13:30:00 crc kubenswrapper[4787]: E0129 13:30:00.172055 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="util" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.172080 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="util" Jan 29 13:30:00 crc kubenswrapper[4787]: E0129 13:30:00.172101 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="extract" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.172112 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="extract" Jan 29 13:30:00 crc kubenswrapper[4787]: E0129 13:30:00.172126 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="pull" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.172134 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="pull" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.172290 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="07d08c2a-7015-4fca-9056-da4ee86a0d95" containerName="extract" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.172841 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.176496 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.177550 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.184046 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k"] Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.337732 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2jq2\" (UniqueName: \"kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.337837 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.337858 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.439397 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.439526 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.439640 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2jq2\" (UniqueName: \"kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.442137 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.446843 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.467181 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2jq2\" (UniqueName: \"kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2\") pod \"collect-profiles-29494890-wjm2k\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.510605 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:00 crc kubenswrapper[4787]: I0129 13:30:00.785013 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k"] Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.540526 4787 generic.go:334] "Generic (PLEG): container finished" podID="24c891d4-ad25-431d-a360-f23ac5d82a73" containerID="d8f366b8ede352cb860c23469298afd1bbd9047e2a69b72f2cb1f1d78b583a14" exitCode=0 Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.540644 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" event={"ID":"24c891d4-ad25-431d-a360-f23ac5d82a73","Type":"ContainerDied","Data":"d8f366b8ede352cb860c23469298afd1bbd9047e2a69b72f2cb1f1d78b583a14"} Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.541011 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" event={"ID":"24c891d4-ad25-431d-a360-f23ac5d82a73","Type":"ContainerStarted","Data":"92dafa5e68845ff553542395177831c758b5d0f8d6ab5d009d46408a4c906328"} Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.633694 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-mwzj8"] Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.634704 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.636643 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.637039 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.637484 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-khw4s" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.650554 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-mwzj8"] Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.674199 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs4rq\" (UniqueName: \"kubernetes.io/projected/d1ad75bd-bcbc-49f4-a0ca-3312f4cee489-kube-api-access-xs4rq\") pod \"nmstate-operator-646758c888-mwzj8\" (UID: \"d1ad75bd-bcbc-49f4-a0ca-3312f4cee489\") " pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.775689 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs4rq\" (UniqueName: \"kubernetes.io/projected/d1ad75bd-bcbc-49f4-a0ca-3312f4cee489-kube-api-access-xs4rq\") pod \"nmstate-operator-646758c888-mwzj8\" (UID: \"d1ad75bd-bcbc-49f4-a0ca-3312f4cee489\") " pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" Jan 29 13:30:01 crc kubenswrapper[4787]: I0129 13:30:01.803571 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs4rq\" (UniqueName: \"kubernetes.io/projected/d1ad75bd-bcbc-49f4-a0ca-3312f4cee489-kube-api-access-xs4rq\") pod \"nmstate-operator-646758c888-mwzj8\" (UID: \"d1ad75bd-bcbc-49f4-a0ca-3312f4cee489\") " pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.003537 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-khw4s" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.011087 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.320340 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-mwzj8"] Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.547760 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" event={"ID":"d1ad75bd-bcbc-49f4-a0ca-3312f4cee489","Type":"ContainerStarted","Data":"daf091c5720f1f856fb392a3308f4520e38f165f8fa0401247eb873c65e4ae10"} Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.866381 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.892036 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume\") pod \"24c891d4-ad25-431d-a360-f23ac5d82a73\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.892217 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2jq2\" (UniqueName: \"kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2\") pod \"24c891d4-ad25-431d-a360-f23ac5d82a73\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.892402 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume\") pod \"24c891d4-ad25-431d-a360-f23ac5d82a73\" (UID: \"24c891d4-ad25-431d-a360-f23ac5d82a73\") " Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.892690 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume" (OuterVolumeSpecName: "config-volume") pod "24c891d4-ad25-431d-a360-f23ac5d82a73" (UID: "24c891d4-ad25-431d-a360-f23ac5d82a73"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.894088 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/24c891d4-ad25-431d-a360-f23ac5d82a73-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.900715 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2" (OuterVolumeSpecName: "kube-api-access-t2jq2") pod "24c891d4-ad25-431d-a360-f23ac5d82a73" (UID: "24c891d4-ad25-431d-a360-f23ac5d82a73"). InnerVolumeSpecName "kube-api-access-t2jq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.900772 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "24c891d4-ad25-431d-a360-f23ac5d82a73" (UID: "24c891d4-ad25-431d-a360-f23ac5d82a73"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.995385 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2jq2\" (UniqueName: \"kubernetes.io/projected/24c891d4-ad25-431d-a360-f23ac5d82a73-kube-api-access-t2jq2\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:02 crc kubenswrapper[4787]: I0129 13:30:02.995421 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/24c891d4-ad25-431d-a360-f23ac5d82a73-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:03 crc kubenswrapper[4787]: I0129 13:30:03.557201 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" event={"ID":"24c891d4-ad25-431d-a360-f23ac5d82a73","Type":"ContainerDied","Data":"92dafa5e68845ff553542395177831c758b5d0f8d6ab5d009d46408a4c906328"} Jan 29 13:30:03 crc kubenswrapper[4787]: I0129 13:30:03.557266 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k" Jan 29 13:30:03 crc kubenswrapper[4787]: I0129 13:30:03.557267 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92dafa5e68845ff553542395177831c758b5d0f8d6ab5d009d46408a4c906328" Jan 29 13:30:04 crc kubenswrapper[4787]: I0129 13:30:04.267600 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:04 crc kubenswrapper[4787]: I0129 13:30:04.268037 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:05 crc kubenswrapper[4787]: I0129 13:30:05.316383 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tqc9f" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="registry-server" probeResult="failure" output=< Jan 29 13:30:05 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 13:30:05 crc kubenswrapper[4787]: > Jan 29 13:30:06 crc kubenswrapper[4787]: I0129 13:30:06.577278 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" event={"ID":"d1ad75bd-bcbc-49f4-a0ca-3312f4cee489","Type":"ContainerStarted","Data":"792a70e413dd0594d57ce79faf5c20b5c1cb88719c23d3c9550170cc5091a6bd"} Jan 29 13:30:06 crc kubenswrapper[4787]: I0129 13:30:06.598479 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-mwzj8" podStartSLOduration=2.082897759 podStartE2EDuration="5.598432821s" podCreationTimestamp="2026-01-29 13:30:01 +0000 UTC" firstStartedPulling="2026-01-29 13:30:02.331673084 +0000 UTC m=+841.092933360" lastFinishedPulling="2026-01-29 13:30:05.847208146 +0000 UTC m=+844.608468422" observedRunningTime="2026-01-29 13:30:06.595005737 +0000 UTC m=+845.356266003" watchObservedRunningTime="2026-01-29 13:30:06.598432821 +0000 UTC m=+845.359693107" Jan 29 13:30:14 crc kubenswrapper[4787]: I0129 13:30:14.327874 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:14 crc kubenswrapper[4787]: I0129 13:30:14.399377 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:14 crc kubenswrapper[4787]: I0129 13:30:14.570872 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:30:15 crc kubenswrapper[4787]: I0129 13:30:15.646971 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tqc9f" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="registry-server" containerID="cri-o://d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399" gracePeriod=2 Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.061118 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.088706 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p42hz\" (UniqueName: \"kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz\") pod \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.088790 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities\") pod \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.088860 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content\") pod \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\" (UID: \"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95\") " Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.091436 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities" (OuterVolumeSpecName: "utilities") pod "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" (UID: "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.096840 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz" (OuterVolumeSpecName: "kube-api-access-p42hz") pod "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" (UID: "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95"). InnerVolumeSpecName "kube-api-access-p42hz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.190712 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p42hz\" (UniqueName: \"kubernetes.io/projected/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-kube-api-access-p42hz\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.190760 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.219262 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" (UID: "bfc1bcab-ef14-4388-bc0c-9812f0fbfe95"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.292096 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.658647 4787 generic.go:334] "Generic (PLEG): container finished" podID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerID="d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399" exitCode=0 Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.658740 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerDied","Data":"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399"} Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.658831 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tqc9f" event={"ID":"bfc1bcab-ef14-4388-bc0c-9812f0fbfe95","Type":"ContainerDied","Data":"bf8bf5c5cd96db0996f022c5b0466db100fba15be37fc2b20f62277b60f66e0f"} Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.658841 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tqc9f" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.658874 4787 scope.go:117] "RemoveContainer" containerID="d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.682470 4787 scope.go:117] "RemoveContainer" containerID="fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.721856 4787 scope.go:117] "RemoveContainer" containerID="2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.728513 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.737837 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tqc9f"] Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.751424 4787 scope.go:117] "RemoveContainer" containerID="d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399" Jan 29 13:30:16 crc kubenswrapper[4787]: E0129 13:30:16.752221 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399\": container with ID starting with d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399 not found: ID does not exist" containerID="d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.752286 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399"} err="failed to get container status \"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399\": rpc error: code = NotFound desc = could not find container \"d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399\": container with ID starting with d880e0eeb3b9615289a449518c2e266c9a1c028ac07e958dec427894e473d399 not found: ID does not exist" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.752326 4787 scope.go:117] "RemoveContainer" containerID="fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369" Jan 29 13:30:16 crc kubenswrapper[4787]: E0129 13:30:16.752760 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369\": container with ID starting with fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369 not found: ID does not exist" containerID="fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.752801 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369"} err="failed to get container status \"fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369\": rpc error: code = NotFound desc = could not find container \"fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369\": container with ID starting with fd4ee22f3970863e3548ec51ab535b3dfecf2b121632841d7ec0fbddf9cad369 not found: ID does not exist" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.752826 4787 scope.go:117] "RemoveContainer" containerID="2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c" Jan 29 13:30:16 crc kubenswrapper[4787]: E0129 13:30:16.753519 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c\": container with ID starting with 2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c not found: ID does not exist" containerID="2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c" Jan 29 13:30:16 crc kubenswrapper[4787]: I0129 13:30:16.753549 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c"} err="failed to get container status \"2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c\": rpc error: code = NotFound desc = could not find container \"2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c\": container with ID starting with 2fb06bfb1841d29e17f6bd2d2e24080417799f3301828657f424442d47a5b94c not found: ID does not exist" Jan 29 13:30:17 crc kubenswrapper[4787]: I0129 13:30:17.999116 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" path="/var/lib/kubelet/pods/bfc1bcab-ef14-4388-bc0c-9812f0fbfe95/volumes" Jan 29 13:30:18 crc kubenswrapper[4787]: I0129 13:30:18.646534 4787 scope.go:117] "RemoveContainer" containerID="1e3a5dc38557101aaeb60f05c95e793d9ac8d5f3ebc9b145eb22d496bf2492a7" Jan 29 13:30:19 crc kubenswrapper[4787]: I0129 13:30:19.690352 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j6wn4_d2526766-68ea-4959-a656-b0c68c754890/kube-multus/2.log" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.314753 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9"] Jan 29 13:30:33 crc kubenswrapper[4787]: E0129 13:30:33.316031 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="registry-server" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316050 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="registry-server" Jan 29 13:30:33 crc kubenswrapper[4787]: E0129 13:30:33.316063 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24c891d4-ad25-431d-a360-f23ac5d82a73" containerName="collect-profiles" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316072 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="24c891d4-ad25-431d-a360-f23ac5d82a73" containerName="collect-profiles" Jan 29 13:30:33 crc kubenswrapper[4787]: E0129 13:30:33.316087 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="extract-content" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316095 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="extract-content" Jan 29 13:30:33 crc kubenswrapper[4787]: E0129 13:30:33.316114 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="extract-utilities" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316121 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="extract-utilities" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316237 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="24c891d4-ad25-431d-a360-f23ac5d82a73" containerName="collect-profiles" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316255 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfc1bcab-ef14-4388-bc0c-9812f0fbfe95" containerName="registry-server" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.316849 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.321995 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.322169 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-qsb6g"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.323404 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.330538 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.331905 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-mnhrs" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.334498 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-55sg5"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.336234 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.352908 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-qsb6g"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458425 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q7zz\" (UniqueName: \"kubernetes.io/projected/f92a2cd2-6857-4c80-bfea-c78d4803e46c-kube-api-access-8q7zz\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458491 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5609e633-dfb8-473b-9165-437046bbf13b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458517 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-ovs-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458550 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsw28\" (UniqueName: \"kubernetes.io/projected/55575e97-6bfe-40d8-b4b5-8f5b020ef25f-kube-api-access-gsw28\") pod \"nmstate-metrics-54757c584b-qsb6g\" (UID: \"55575e97-6bfe-40d8-b4b5-8f5b020ef25f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458573 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-nmstate-lock\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458607 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-dbus-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.458626 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h9dp\" (UniqueName: \"kubernetes.io/projected/5609e633-dfb8-473b-9165-437046bbf13b-kube-api-access-2h9dp\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.493067 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.493902 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.496119 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-xfmn7" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.496442 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.501183 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.506171 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560257 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-dbus-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560315 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h9dp\" (UniqueName: \"kubernetes.io/projected/5609e633-dfb8-473b-9165-437046bbf13b-kube-api-access-2h9dp\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560375 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q7zz\" (UniqueName: \"kubernetes.io/projected/f92a2cd2-6857-4c80-bfea-c78d4803e46c-kube-api-access-8q7zz\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560401 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5609e633-dfb8-473b-9165-437046bbf13b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560423 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-ovs-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560479 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsw28\" (UniqueName: \"kubernetes.io/projected/55575e97-6bfe-40d8-b4b5-8f5b020ef25f-kube-api-access-gsw28\") pod \"nmstate-metrics-54757c584b-qsb6g\" (UID: \"55575e97-6bfe-40d8-b4b5-8f5b020ef25f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560506 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-nmstate-lock\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560577 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-nmstate-lock\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.560648 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-ovs-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.562916 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f92a2cd2-6857-4c80-bfea-c78d4803e46c-dbus-socket\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.580484 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5609e633-dfb8-473b-9165-437046bbf13b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.584138 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsw28\" (UniqueName: \"kubernetes.io/projected/55575e97-6bfe-40d8-b4b5-8f5b020ef25f-kube-api-access-gsw28\") pod \"nmstate-metrics-54757c584b-qsb6g\" (UID: \"55575e97-6bfe-40d8-b4b5-8f5b020ef25f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.584678 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q7zz\" (UniqueName: \"kubernetes.io/projected/f92a2cd2-6857-4c80-bfea-c78d4803e46c-kube-api-access-8q7zz\") pod \"nmstate-handler-55sg5\" (UID: \"f92a2cd2-6857-4c80-bfea-c78d4803e46c\") " pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.585124 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h9dp\" (UniqueName: \"kubernetes.io/projected/5609e633-dfb8-473b-9165-437046bbf13b-kube-api-access-2h9dp\") pod \"nmstate-webhook-8474b5b9d8-r6pn9\" (UID: \"5609e633-dfb8-473b-9165-437046bbf13b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.645768 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.658963 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.661416 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9480738d-8817-480c-8968-7107f514a967-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.661447 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9480738d-8817-480c-8968-7107f514a967-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.661523 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqnzt\" (UniqueName: \"kubernetes.io/projected/9480738d-8817-480c-8968-7107f514a967-kube-api-access-sqnzt\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.667338 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.689802 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-79fc796696-2gfbk"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.690661 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.700225 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-79fc796696-2gfbk"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.762907 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqnzt\" (UniqueName: \"kubernetes.io/projected/9480738d-8817-480c-8968-7107f514a967-kube-api-access-sqnzt\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.763308 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9480738d-8817-480c-8968-7107f514a967-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.763339 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9480738d-8817-480c-8968-7107f514a967-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.766600 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/9480738d-8817-480c-8968-7107f514a967-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.770627 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/9480738d-8817-480c-8968-7107f514a967-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.782312 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqnzt\" (UniqueName: \"kubernetes.io/projected/9480738d-8817-480c-8968-7107f514a967-kube-api-access-sqnzt\") pod \"nmstate-console-plugin-7754f76f8b-wht4l\" (UID: \"9480738d-8817-480c-8968-7107f514a967\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.800867 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-55sg5" event={"ID":"f92a2cd2-6857-4c80-bfea-c78d4803e46c","Type":"ContainerStarted","Data":"4bb87606414de589c6f54a2de5d9896cdce8c50f51836ffa041fdf1504969aa3"} Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.813849 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864589 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-oauth-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864643 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864674 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zl5z\" (UniqueName: \"kubernetes.io/projected/be55c3cb-ec09-4118-8264-cbdf08d46c27-kube-api-access-2zl5z\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864708 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864740 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-oauth-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864781 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-trusted-ca-bundle\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.864820 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-service-ca\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.918130 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9"] Jan 29 13:30:33 crc kubenswrapper[4787]: W0129 13:30:33.937542 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5609e633_dfb8_473b_9165_437046bbf13b.slice/crio-a7a4dacbe25fdd4732eb39c8f44d0c6a0ee5f3c128f2b9175b0e53df441a86a0 WatchSource:0}: Error finding container a7a4dacbe25fdd4732eb39c8f44d0c6a0ee5f3c128f2b9175b0e53df441a86a0: Status 404 returned error can't find the container with id a7a4dacbe25fdd4732eb39c8f44d0c6a0ee5f3c128f2b9175b0e53df441a86a0 Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967598 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-trusted-ca-bundle\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967698 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-service-ca\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967777 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-oauth-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967803 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zl5z\" (UniqueName: \"kubernetes.io/projected/be55c3cb-ec09-4118-8264-cbdf08d46c27-kube-api-access-2zl5z\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967827 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.967863 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.970622 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-oauth-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.970241 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.970529 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-trusted-ca-bundle\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.969583 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-service-ca\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.971562 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/be55c3cb-ec09-4118-8264-cbdf08d46c27-oauth-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.973664 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-qsb6g"] Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.976319 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-oauth-config\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.977565 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/be55c3cb-ec09-4118-8264-cbdf08d46c27-console-serving-cert\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:33 crc kubenswrapper[4787]: W0129 13:30:33.979754 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55575e97_6bfe_40d8_b4b5_8f5b020ef25f.slice/crio-78e302342338bf98094bb169e0c3322c5d85856d8ff823749758e909f42fb540 WatchSource:0}: Error finding container 78e302342338bf98094bb169e0c3322c5d85856d8ff823749758e909f42fb540: Status 404 returned error can't find the container with id 78e302342338bf98094bb169e0c3322c5d85856d8ff823749758e909f42fb540 Jan 29 13:30:33 crc kubenswrapper[4787]: I0129 13:30:33.986039 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zl5z\" (UniqueName: \"kubernetes.io/projected/be55c3cb-ec09-4118-8264-cbdf08d46c27-kube-api-access-2zl5z\") pod \"console-79fc796696-2gfbk\" (UID: \"be55c3cb-ec09-4118-8264-cbdf08d46c27\") " pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.024149 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l"] Jan 29 13:30:34 crc kubenswrapper[4787]: W0129 13:30:34.030532 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9480738d_8817_480c_8968_7107f514a967.slice/crio-cdbdff89d1e867e3acb685bffc0d07890684879308c1f0c4c1b1828cf42125f5 WatchSource:0}: Error finding container cdbdff89d1e867e3acb685bffc0d07890684879308c1f0c4c1b1828cf42125f5: Status 404 returned error can't find the container with id cdbdff89d1e867e3acb685bffc0d07890684879308c1f0c4c1b1828cf42125f5 Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.041584 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.304507 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-79fc796696-2gfbk"] Jan 29 13:30:34 crc kubenswrapper[4787]: W0129 13:30:34.307551 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe55c3cb_ec09_4118_8264_cbdf08d46c27.slice/crio-4555aca2398ff15c238e9c4d8b2a0344b2be3e13515b629d6fff0b9c61f33ec0 WatchSource:0}: Error finding container 4555aca2398ff15c238e9c4d8b2a0344b2be3e13515b629d6fff0b9c61f33ec0: Status 404 returned error can't find the container with id 4555aca2398ff15c238e9c4d8b2a0344b2be3e13515b629d6fff0b9c61f33ec0 Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.809255 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" event={"ID":"55575e97-6bfe-40d8-b4b5-8f5b020ef25f","Type":"ContainerStarted","Data":"78e302342338bf98094bb169e0c3322c5d85856d8ff823749758e909f42fb540"} Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.810632 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" event={"ID":"5609e633-dfb8-473b-9165-437046bbf13b","Type":"ContainerStarted","Data":"a7a4dacbe25fdd4732eb39c8f44d0c6a0ee5f3c128f2b9175b0e53df441a86a0"} Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.812626 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-79fc796696-2gfbk" event={"ID":"be55c3cb-ec09-4118-8264-cbdf08d46c27","Type":"ContainerStarted","Data":"c4361860e4e5a821292e4f7b3d17b5f74450a3af38ef2f9ab58e215af29f1f02"} Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.812661 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-79fc796696-2gfbk" event={"ID":"be55c3cb-ec09-4118-8264-cbdf08d46c27","Type":"ContainerStarted","Data":"4555aca2398ff15c238e9c4d8b2a0344b2be3e13515b629d6fff0b9c61f33ec0"} Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.814786 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" event={"ID":"9480738d-8817-480c-8968-7107f514a967","Type":"ContainerStarted","Data":"cdbdff89d1e867e3acb685bffc0d07890684879308c1f0c4c1b1828cf42125f5"} Jan 29 13:30:34 crc kubenswrapper[4787]: I0129 13:30:34.842546 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-79fc796696-2gfbk" podStartSLOduration=1.842527392 podStartE2EDuration="1.842527392s" podCreationTimestamp="2026-01-29 13:30:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:30:34.84106776 +0000 UTC m=+873.602328066" watchObservedRunningTime="2026-01-29 13:30:34.842527392 +0000 UTC m=+873.603787668" Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.837748 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-55sg5" event={"ID":"f92a2cd2-6857-4c80-bfea-c78d4803e46c","Type":"ContainerStarted","Data":"6fa137d29ef17351e76d3e6db10204764b0e2c2388f09c340e29b216101f4339"} Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.838776 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.840567 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" event={"ID":"9480738d-8817-480c-8968-7107f514a967","Type":"ContainerStarted","Data":"f82ef420b6894ea3e322f1f65ac4a940ac7d6c1fe4edbc449cc610b39aba8fc2"} Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.845823 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" event={"ID":"55575e97-6bfe-40d8-b4b5-8f5b020ef25f","Type":"ContainerStarted","Data":"df53b0218ffef6020de17c4a41d578399cac0cc48f1baddb15d3cf74519f7863"} Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.847216 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" event={"ID":"5609e633-dfb8-473b-9165-437046bbf13b","Type":"ContainerStarted","Data":"31d415f28e47785a0fc353c4bb49c642e9359695acdba31b895adb98ade2a7b5"} Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.847354 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.856855 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-55sg5" podStartSLOduration=1.839151675 podStartE2EDuration="4.856832787s" podCreationTimestamp="2026-01-29 13:30:33 +0000 UTC" firstStartedPulling="2026-01-29 13:30:33.763534152 +0000 UTC m=+872.524794428" lastFinishedPulling="2026-01-29 13:30:36.781215254 +0000 UTC m=+875.542475540" observedRunningTime="2026-01-29 13:30:37.854867649 +0000 UTC m=+876.616127925" watchObservedRunningTime="2026-01-29 13:30:37.856832787 +0000 UTC m=+876.618093063" Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.877210 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wht4l" podStartSLOduration=2.151523634 podStartE2EDuration="4.87716568s" podCreationTimestamp="2026-01-29 13:30:33 +0000 UTC" firstStartedPulling="2026-01-29 13:30:34.033189044 +0000 UTC m=+872.794449320" lastFinishedPulling="2026-01-29 13:30:36.75883108 +0000 UTC m=+875.520091366" observedRunningTime="2026-01-29 13:30:37.873489143 +0000 UTC m=+876.634749419" watchObservedRunningTime="2026-01-29 13:30:37.87716568 +0000 UTC m=+876.638425956" Jan 29 13:30:37 crc kubenswrapper[4787]: I0129 13:30:37.900718 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" podStartSLOduration=2.068674255 podStartE2EDuration="4.900682016s" podCreationTimestamp="2026-01-29 13:30:33 +0000 UTC" firstStartedPulling="2026-01-29 13:30:33.948132231 +0000 UTC m=+872.709392497" lastFinishedPulling="2026-01-29 13:30:36.780139982 +0000 UTC m=+875.541400258" observedRunningTime="2026-01-29 13:30:37.89397798 +0000 UTC m=+876.655238256" watchObservedRunningTime="2026-01-29 13:30:37.900682016 +0000 UTC m=+876.661942302" Jan 29 13:30:39 crc kubenswrapper[4787]: I0129 13:30:39.889294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" event={"ID":"55575e97-6bfe-40d8-b4b5-8f5b020ef25f","Type":"ContainerStarted","Data":"d2c1e0789b785d1d3cb8aba5608849f40ae3e308c136443c5b40b0ad99ac0bd8"} Jan 29 13:30:39 crc kubenswrapper[4787]: I0129 13:30:39.925114 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-qsb6g" podStartSLOduration=1.2837622 podStartE2EDuration="6.925067869s" podCreationTimestamp="2026-01-29 13:30:33 +0000 UTC" firstStartedPulling="2026-01-29 13:30:33.987361816 +0000 UTC m=+872.748622092" lastFinishedPulling="2026-01-29 13:30:39.628667485 +0000 UTC m=+878.389927761" observedRunningTime="2026-01-29 13:30:39.920968119 +0000 UTC m=+878.682228445" watchObservedRunningTime="2026-01-29 13:30:39.925067869 +0000 UTC m=+878.686328205" Jan 29 13:30:43 crc kubenswrapper[4787]: I0129 13:30:43.708073 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-55sg5" Jan 29 13:30:44 crc kubenswrapper[4787]: I0129 13:30:44.042395 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:44 crc kubenswrapper[4787]: I0129 13:30:44.042497 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:44 crc kubenswrapper[4787]: I0129 13:30:44.048710 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:44 crc kubenswrapper[4787]: I0129 13:30:44.937711 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-79fc796696-2gfbk" Jan 29 13:30:45 crc kubenswrapper[4787]: I0129 13:30:45.017993 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:30:53 crc kubenswrapper[4787]: I0129 13:30:53.655337 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-r6pn9" Jan 29 13:30:58 crc kubenswrapper[4787]: I0129 13:30:58.394183 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:30:58 crc kubenswrapper[4787]: I0129 13:30:58.395116 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.005227 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n"] Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.007393 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.012138 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.017414 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n"] Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.035708 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.035987 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgp5d\" (UniqueName: \"kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.036063 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.136815 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgp5d\" (UniqueName: \"kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.137186 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.137313 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.137805 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.137886 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.161871 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgp5d\" (UniqueName: \"kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.323886 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:09 crc kubenswrapper[4787]: I0129 13:31:09.815627 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n"] Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.073134 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5v5vz" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" containerID="cri-o://1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a" gracePeriod=15 Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.137398 4787 generic.go:334] "Generic (PLEG): container finished" podID="ac56de9c-d784-426d-830c-9adfa79702a0" containerID="e9fbfde864da297a9b8c180977852b0aa609708937b9ce28301183bfbedf87eb" exitCode=0 Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.137473 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" event={"ID":"ac56de9c-d784-426d-830c-9adfa79702a0","Type":"ContainerDied","Data":"e9fbfde864da297a9b8c180977852b0aa609708937b9ce28301183bfbedf87eb"} Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.137510 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" event={"ID":"ac56de9c-d784-426d-830c-9adfa79702a0","Type":"ContainerStarted","Data":"74895d3cea16fb6d3d334e565231354a8979d2e6b9d575c416900699c9d61bfc"} Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.266803 4787 patch_prober.go:28] interesting pod/console-f9d7485db-5v5vz container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.267374 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-5v5vz" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.508014 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5v5vz_2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af/console/0.log" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.508094 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590138 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590241 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhr62\" (UniqueName: \"kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590303 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590400 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590446 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590581 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.590618 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle\") pod \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\" (UID: \"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af\") " Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.592255 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.592276 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca" (OuterVolumeSpecName: "service-ca") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.592428 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.592986 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config" (OuterVolumeSpecName: "console-config") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.600545 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.603760 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.604226 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62" (OuterVolumeSpecName: "kube-api-access-lhr62") pod "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" (UID: "2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af"). InnerVolumeSpecName "kube-api-access-lhr62". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693240 4787 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693329 4787 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-service-ca\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693355 4787 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693378 4787 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693402 4787 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693425 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhr62\" (UniqueName: \"kubernetes.io/projected/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-kube-api-access-lhr62\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:10 crc kubenswrapper[4787]: I0129 13:31:10.693543 4787 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af-console-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148620 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5v5vz_2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af/console/0.log" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148691 4787 generic.go:334] "Generic (PLEG): container finished" podID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerID="1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a" exitCode=2 Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148733 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5v5vz" event={"ID":"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af","Type":"ContainerDied","Data":"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a"} Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148774 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5v5vz" event={"ID":"2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af","Type":"ContainerDied","Data":"c438728e97f9fd4e3c2344d24f41a82440cffcd2fd54b0695954f7d96acf119d"} Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148798 4787 scope.go:117] "RemoveContainer" containerID="1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.148828 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5v5vz" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.178614 4787 scope.go:117] "RemoveContainer" containerID="1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a" Jan 29 13:31:11 crc kubenswrapper[4787]: E0129 13:31:11.179870 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a\": container with ID starting with 1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a not found: ID does not exist" containerID="1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.179950 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a"} err="failed to get container status \"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a\": rpc error: code = NotFound desc = could not find container \"1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a\": container with ID starting with 1f2f9aa7ace4b51e80e9e3d4ce32887848f474080199d22cb82b6c98a16ee70a not found: ID does not exist" Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.204240 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.208619 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5v5vz"] Jan 29 13:31:11 crc kubenswrapper[4787]: I0129 13:31:11.997169 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" path="/var/lib/kubelet/pods/2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af/volumes" Jan 29 13:31:12 crc kubenswrapper[4787]: I0129 13:31:12.159009 4787 generic.go:334] "Generic (PLEG): container finished" podID="ac56de9c-d784-426d-830c-9adfa79702a0" containerID="92f13cd5aaa96d7ad92bc1fd0c6b6c8858351e220cc81e6e7b631ecab8951660" exitCode=0 Jan 29 13:31:12 crc kubenswrapper[4787]: I0129 13:31:12.159060 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" event={"ID":"ac56de9c-d784-426d-830c-9adfa79702a0","Type":"ContainerDied","Data":"92f13cd5aaa96d7ad92bc1fd0c6b6c8858351e220cc81e6e7b631ecab8951660"} Jan 29 13:31:13 crc kubenswrapper[4787]: I0129 13:31:13.170724 4787 generic.go:334] "Generic (PLEG): container finished" podID="ac56de9c-d784-426d-830c-9adfa79702a0" containerID="7752284256efb4917565b846c0abd9ff0c125057d144044f67e0f0d1214ce0d0" exitCode=0 Jan 29 13:31:13 crc kubenswrapper[4787]: I0129 13:31:13.170796 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" event={"ID":"ac56de9c-d784-426d-830c-9adfa79702a0","Type":"ContainerDied","Data":"7752284256efb4917565b846c0abd9ff0c125057d144044f67e0f0d1214ce0d0"} Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.508997 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.558949 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle\") pod \"ac56de9c-d784-426d-830c-9adfa79702a0\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.559018 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util\") pod \"ac56de9c-d784-426d-830c-9adfa79702a0\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.559155 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgp5d\" (UniqueName: \"kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d\") pod \"ac56de9c-d784-426d-830c-9adfa79702a0\" (UID: \"ac56de9c-d784-426d-830c-9adfa79702a0\") " Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.561074 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle" (OuterVolumeSpecName: "bundle") pod "ac56de9c-d784-426d-830c-9adfa79702a0" (UID: "ac56de9c-d784-426d-830c-9adfa79702a0"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.566764 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d" (OuterVolumeSpecName: "kube-api-access-bgp5d") pod "ac56de9c-d784-426d-830c-9adfa79702a0" (UID: "ac56de9c-d784-426d-830c-9adfa79702a0"). InnerVolumeSpecName "kube-api-access-bgp5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.591183 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util" (OuterVolumeSpecName: "util") pod "ac56de9c-d784-426d-830c-9adfa79702a0" (UID: "ac56de9c-d784-426d-830c-9adfa79702a0"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.661288 4787 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.661354 4787 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac56de9c-d784-426d-830c-9adfa79702a0-util\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:14 crc kubenswrapper[4787]: I0129 13:31:14.661376 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgp5d\" (UniqueName: \"kubernetes.io/projected/ac56de9c-d784-426d-830c-9adfa79702a0-kube-api-access-bgp5d\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:15 crc kubenswrapper[4787]: I0129 13:31:15.187741 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" event={"ID":"ac56de9c-d784-426d-830c-9adfa79702a0","Type":"ContainerDied","Data":"74895d3cea16fb6d3d334e565231354a8979d2e6b9d575c416900699c9d61bfc"} Jan 29 13:31:15 crc kubenswrapper[4787]: I0129 13:31:15.187803 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n" Jan 29 13:31:15 crc kubenswrapper[4787]: I0129 13:31:15.187818 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74895d3cea16fb6d3d334e565231354a8979d2e6b9d575c416900699c9d61bfc" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.705631 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x"] Jan 29 13:31:23 crc kubenswrapper[4787]: E0129 13:31:23.706852 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="extract" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.706877 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="extract" Jan 29 13:31:23 crc kubenswrapper[4787]: E0129 13:31:23.706895 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="pull" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.706905 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="pull" Jan 29 13:31:23 crc kubenswrapper[4787]: E0129 13:31:23.706933 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.706942 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" Jan 29 13:31:23 crc kubenswrapper[4787]: E0129 13:31:23.706953 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="util" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.706961 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="util" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.707084 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d9e6fd9-f3cb-4a2c-b877-70b681bcb0af" containerName="console" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.707106 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac56de9c-d784-426d-830c-9adfa79702a0" containerName="extract" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.707715 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.709936 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-xsmpj" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.710285 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.710341 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.710495 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.711538 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.716317 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x"] Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.795503 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-apiservice-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.795592 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-webhook-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.795633 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6hkk\" (UniqueName: \"kubernetes.io/projected/6758092c-146f-4462-9aea-214f3c018f71-kube-api-access-t6hkk\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.896681 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6hkk\" (UniqueName: \"kubernetes.io/projected/6758092c-146f-4462-9aea-214f3c018f71-kube-api-access-t6hkk\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.896795 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-apiservice-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.896921 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-webhook-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.924930 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-webhook-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.926858 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6758092c-146f-4462-9aea-214f3c018f71-apiservice-cert\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:23 crc kubenswrapper[4787]: I0129 13:31:23.927028 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6hkk\" (UniqueName: \"kubernetes.io/projected/6758092c-146f-4462-9aea-214f3c018f71-kube-api-access-t6hkk\") pod \"metallb-operator-controller-manager-757f9bfbb9-gzt8x\" (UID: \"6758092c-146f-4462-9aea-214f3c018f71\") " pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.063780 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq"] Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.064781 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.067315 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-gcdxn" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.068092 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.068108 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.074689 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.086532 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq"] Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.205523 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czsf6\" (UniqueName: \"kubernetes.io/projected/43423821-b0d6-4d8a-a8cb-d03102470ff0-kube-api-access-czsf6\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.205618 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-webhook-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.205662 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-apiservice-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.307300 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czsf6\" (UniqueName: \"kubernetes.io/projected/43423821-b0d6-4d8a-a8cb-d03102470ff0-kube-api-access-czsf6\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.307382 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-webhook-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.307413 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-apiservice-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.315279 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-webhook-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.325630 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/43423821-b0d6-4d8a-a8cb-d03102470ff0-apiservice-cert\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.330149 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czsf6\" (UniqueName: \"kubernetes.io/projected/43423821-b0d6-4d8a-a8cb-d03102470ff0-kube-api-access-czsf6\") pod \"metallb-operator-webhook-server-747fb77c56-fsqhq\" (UID: \"43423821-b0d6-4d8a-a8cb-d03102470ff0\") " pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.379608 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x"] Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.380959 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:24 crc kubenswrapper[4787]: I0129 13:31:24.628578 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq"] Jan 29 13:31:24 crc kubenswrapper[4787]: W0129 13:31:24.638573 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43423821_b0d6_4d8a_a8cb_d03102470ff0.slice/crio-102b4ece88e523dca8b5a26f523cf44f95d1a8515f1c58e06e8e05a6a89148d9 WatchSource:0}: Error finding container 102b4ece88e523dca8b5a26f523cf44f95d1a8515f1c58e06e8e05a6a89148d9: Status 404 returned error can't find the container with id 102b4ece88e523dca8b5a26f523cf44f95d1a8515f1c58e06e8e05a6a89148d9 Jan 29 13:31:25 crc kubenswrapper[4787]: I0129 13:31:25.262330 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" event={"ID":"43423821-b0d6-4d8a-a8cb-d03102470ff0","Type":"ContainerStarted","Data":"102b4ece88e523dca8b5a26f523cf44f95d1a8515f1c58e06e8e05a6a89148d9"} Jan 29 13:31:25 crc kubenswrapper[4787]: I0129 13:31:25.263805 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" event={"ID":"6758092c-146f-4462-9aea-214f3c018f71","Type":"ContainerStarted","Data":"038305b8f920e815c77120a668be48998830c928e6b53ec96f891ff933529c5a"} Jan 29 13:31:28 crc kubenswrapper[4787]: I0129 13:31:28.394266 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:31:28 crc kubenswrapper[4787]: I0129 13:31:28.396252 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:31:31 crc kubenswrapper[4787]: I0129 13:31:31.302761 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" event={"ID":"43423821-b0d6-4d8a-a8cb-d03102470ff0","Type":"ContainerStarted","Data":"34ed9a5093dc8d5c6118e7730a6d9dc10a093feed508c936b646be60bdb6fd3b"} Jan 29 13:31:31 crc kubenswrapper[4787]: I0129 13:31:31.304241 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:31 crc kubenswrapper[4787]: I0129 13:31:31.304474 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" event={"ID":"6758092c-146f-4462-9aea-214f3c018f71","Type":"ContainerStarted","Data":"b4692e4ababb1110af00f564d7767ed48804454ea4a310e836e88752433ce295"} Jan 29 13:31:31 crc kubenswrapper[4787]: I0129 13:31:31.304614 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:31:31 crc kubenswrapper[4787]: I0129 13:31:31.334126 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" podStartSLOduration=1.976728086 podStartE2EDuration="7.334097761s" podCreationTimestamp="2026-01-29 13:31:24 +0000 UTC" firstStartedPulling="2026-01-29 13:31:24.641330676 +0000 UTC m=+923.402590952" lastFinishedPulling="2026-01-29 13:31:29.998700351 +0000 UTC m=+928.759960627" observedRunningTime="2026-01-29 13:31:31.331274423 +0000 UTC m=+930.092534709" watchObservedRunningTime="2026-01-29 13:31:31.334097761 +0000 UTC m=+930.095358047" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.093643 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" podStartSLOduration=15.500403473 podStartE2EDuration="21.093610691s" podCreationTimestamp="2026-01-29 13:31:23 +0000 UTC" firstStartedPulling="2026-01-29 13:31:24.40465148 +0000 UTC m=+923.165911756" lastFinishedPulling="2026-01-29 13:31:29.997858698 +0000 UTC m=+928.759118974" observedRunningTime="2026-01-29 13:31:31.35938561 +0000 UTC m=+930.120645886" watchObservedRunningTime="2026-01-29 13:31:44.093610691 +0000 UTC m=+942.854870987" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.098529 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.099963 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.127964 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.136125 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkz7d\" (UniqueName: \"kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.136302 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.136356 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.237617 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.238226 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkz7d\" (UniqueName: \"kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.238289 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.238289 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.238816 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.265042 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkz7d\" (UniqueName: \"kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d\") pod \"redhat-marketplace-2tsqq\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.392488 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-747fb77c56-fsqhq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.431159 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:44 crc kubenswrapper[4787]: I0129 13:31:44.701326 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:45 crc kubenswrapper[4787]: I0129 13:31:45.424139 4787 generic.go:334] "Generic (PLEG): container finished" podID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerID="961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b" exitCode=0 Jan 29 13:31:45 crc kubenswrapper[4787]: I0129 13:31:45.424229 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerDied","Data":"961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b"} Jan 29 13:31:45 crc kubenswrapper[4787]: I0129 13:31:45.424276 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerStarted","Data":"fca6797b7871576c1c2b9b8673d603a0ff3e25106a1452171256af90244aef49"} Jan 29 13:31:46 crc kubenswrapper[4787]: I0129 13:31:46.450279 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerStarted","Data":"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342"} Jan 29 13:31:47 crc kubenswrapper[4787]: I0129 13:31:47.462162 4787 generic.go:334] "Generic (PLEG): container finished" podID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerID="8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342" exitCode=0 Jan 29 13:31:47 crc kubenswrapper[4787]: I0129 13:31:47.462242 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerDied","Data":"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342"} Jan 29 13:31:48 crc kubenswrapper[4787]: I0129 13:31:48.469839 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerStarted","Data":"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf"} Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.431773 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.432705 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.470396 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.488270 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2tsqq" podStartSLOduration=8.060238082 podStartE2EDuration="10.488247705s" podCreationTimestamp="2026-01-29 13:31:44 +0000 UTC" firstStartedPulling="2026-01-29 13:31:45.426651596 +0000 UTC m=+944.187911902" lastFinishedPulling="2026-01-29 13:31:47.854661249 +0000 UTC m=+946.615921525" observedRunningTime="2026-01-29 13:31:48.491219967 +0000 UTC m=+947.252480243" watchObservedRunningTime="2026-01-29 13:31:54.488247705 +0000 UTC m=+953.249507991" Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.548871 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:54 crc kubenswrapper[4787]: I0129 13:31:54.723517 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:56 crc kubenswrapper[4787]: I0129 13:31:56.528625 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2tsqq" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="registry-server" containerID="cri-o://8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf" gracePeriod=2 Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.002030 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.065184 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkz7d\" (UniqueName: \"kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d\") pod \"fe78c247-a00a-4142-861a-bb0c6955d2aa\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.065255 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities\") pod \"fe78c247-a00a-4142-861a-bb0c6955d2aa\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.065443 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content\") pod \"fe78c247-a00a-4142-861a-bb0c6955d2aa\" (UID: \"fe78c247-a00a-4142-861a-bb0c6955d2aa\") " Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.066679 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities" (OuterVolumeSpecName: "utilities") pod "fe78c247-a00a-4142-861a-bb0c6955d2aa" (UID: "fe78c247-a00a-4142-861a-bb0c6955d2aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.075713 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d" (OuterVolumeSpecName: "kube-api-access-kkz7d") pod "fe78c247-a00a-4142-861a-bb0c6955d2aa" (UID: "fe78c247-a00a-4142-861a-bb0c6955d2aa"). InnerVolumeSpecName "kube-api-access-kkz7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.104069 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe78c247-a00a-4142-861a-bb0c6955d2aa" (UID: "fe78c247-a00a-4142-861a-bb0c6955d2aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.167147 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.167216 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkz7d\" (UniqueName: \"kubernetes.io/projected/fe78c247-a00a-4142-861a-bb0c6955d2aa-kube-api-access-kkz7d\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.167239 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe78c247-a00a-4142-861a-bb0c6955d2aa-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.542727 4787 generic.go:334] "Generic (PLEG): container finished" podID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerID="8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf" exitCode=0 Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.542858 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerDied","Data":"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf"} Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.542889 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2tsqq" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.543441 4787 scope.go:117] "RemoveContainer" containerID="8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.543413 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2tsqq" event={"ID":"fe78c247-a00a-4142-861a-bb0c6955d2aa","Type":"ContainerDied","Data":"fca6797b7871576c1c2b9b8673d603a0ff3e25106a1452171256af90244aef49"} Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.592192 4787 scope.go:117] "RemoveContainer" containerID="8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.618198 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.635074 4787 scope.go:117] "RemoveContainer" containerID="961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.645786 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2tsqq"] Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.665505 4787 scope.go:117] "RemoveContainer" containerID="8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf" Jan 29 13:31:57 crc kubenswrapper[4787]: E0129 13:31:57.666152 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf\": container with ID starting with 8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf not found: ID does not exist" containerID="8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.666250 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf"} err="failed to get container status \"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf\": rpc error: code = NotFound desc = could not find container \"8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf\": container with ID starting with 8c8899277b8ead16cc24eb0814a04a99acb3df799d6a42570df4e0a1f3a11ebf not found: ID does not exist" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.666296 4787 scope.go:117] "RemoveContainer" containerID="8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342" Jan 29 13:31:57 crc kubenswrapper[4787]: E0129 13:31:57.666705 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342\": container with ID starting with 8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342 not found: ID does not exist" containerID="8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.666747 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342"} err="failed to get container status \"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342\": rpc error: code = NotFound desc = could not find container \"8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342\": container with ID starting with 8eb95acd3df3404932b3270b71a93510588a2bdbb554e5d4f5ac55387ad89342 not found: ID does not exist" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.666776 4787 scope.go:117] "RemoveContainer" containerID="961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b" Jan 29 13:31:57 crc kubenswrapper[4787]: E0129 13:31:57.668012 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b\": container with ID starting with 961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b not found: ID does not exist" containerID="961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.668051 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b"} err="failed to get container status \"961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b\": rpc error: code = NotFound desc = could not find container \"961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b\": container with ID starting with 961bddc1db3bb72188891ca88ca9585fc7136d445f9a9208978ecbc7c8f58c2b not found: ID does not exist" Jan 29 13:31:57 crc kubenswrapper[4787]: I0129 13:31:57.999552 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" path="/var/lib/kubelet/pods/fe78c247-a00a-4142-861a-bb0c6955d2aa/volumes" Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.395283 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.395390 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.395530 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.396445 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.396580 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b" gracePeriod=600 Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.552119 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b" exitCode=0 Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.552190 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b"} Jan 29 13:31:58 crc kubenswrapper[4787]: I0129 13:31:58.552237 4787 scope.go:117] "RemoveContainer" containerID="4dd80cb5b01d45821c261b540b1bfc37859561721213e5bfb9d7026de25f0942" Jan 29 13:31:59 crc kubenswrapper[4787]: I0129 13:31:59.565323 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a"} Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.077941 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-757f9bfbb9-gzt8x" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.882724 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-8nc6k"] Jan 29 13:32:04 crc kubenswrapper[4787]: E0129 13:32:04.883531 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="extract-utilities" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.883608 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="extract-utilities" Jan 29 13:32:04 crc kubenswrapper[4787]: E0129 13:32:04.883661 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="registry-server" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.883711 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="registry-server" Jan 29 13:32:04 crc kubenswrapper[4787]: E0129 13:32:04.883767 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="extract-content" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.883818 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="extract-content" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.883997 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe78c247-a00a-4142-861a-bb0c6955d2aa" containerName="registry-server" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.886011 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.889309 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.889315 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.889481 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-g2f9w" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.898083 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9"] Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.899151 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.901737 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.914497 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9"] Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992223 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4c81250-d1f0-4706-8c3f-70e69c976131-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992329 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-reloader\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992369 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5zlf\" (UniqueName: \"kubernetes.io/projected/499c781e-42ec-4475-ab1e-d5204f4bdac4-kube-api-access-c5zlf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992399 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-conf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992495 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992673 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-startup\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992717 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992741 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-sockets\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:04 crc kubenswrapper[4787]: I0129 13:32:04.992773 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g22dq\" (UniqueName: \"kubernetes.io/projected/d4c81250-d1f0-4706-8c3f-70e69c976131-kube-api-access-g22dq\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.026532 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-ntsqx"] Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.027746 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.030363 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.031046 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.031423 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.032130 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-7d5v5" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.032370 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-94cfr"] Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.033759 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.037701 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.047129 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-94cfr"] Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094556 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094626 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094654 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4c81250-d1f0-4706-8c3f-70e69c976131-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094677 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metallb-excludel2\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094867 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-reloader\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5zlf\" (UniqueName: \"kubernetes.io/projected/499c781e-42ec-4475-ab1e-d5204f4bdac4-kube-api-access-c5zlf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094961 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kq8z\" (UniqueName: \"kubernetes.io/projected/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-kube-api-access-2kq8z\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.094983 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-conf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095051 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn5rn\" (UniqueName: \"kubernetes.io/projected/b506b359-c014-4d87-b053-ec4e7fc51ba2-kube-api-access-pn5rn\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095104 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095131 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-startup\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095170 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095202 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-sockets\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095237 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-cert\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095281 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-metrics-certs\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.095309 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g22dq\" (UniqueName: \"kubernetes.io/projected/d4c81250-d1f0-4706-8c3f-70e69c976131-kube-api-access-g22dq\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.095833 4787 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.095893 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs podName:499c781e-42ec-4475-ab1e-d5204f4bdac4 nodeName:}" failed. No retries permitted until 2026-01-29 13:32:05.595871151 +0000 UTC m=+964.357131427 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs") pod "frr-k8s-8nc6k" (UID: "499c781e-42ec-4475-ab1e-d5204f4bdac4") : secret "frr-k8s-certs-secret" not found Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.096105 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-reloader\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.096151 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-sockets\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.096375 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-conf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.096403 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.096786 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/499c781e-42ec-4475-ab1e-d5204f4bdac4-frr-startup\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.112410 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d4c81250-d1f0-4706-8c3f-70e69c976131-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.116143 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5zlf\" (UniqueName: \"kubernetes.io/projected/499c781e-42ec-4475-ab1e-d5204f4bdac4-kube-api-access-c5zlf\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.116202 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g22dq\" (UniqueName: \"kubernetes.io/projected/d4c81250-d1f0-4706-8c3f-70e69c976131-kube-api-access-g22dq\") pod \"frr-k8s-webhook-server-7df86c4f6c-l82t9\" (UID: \"d4c81250-d1f0-4706-8c3f-70e69c976131\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197173 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197240 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197270 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metallb-excludel2\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197339 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kq8z\" (UniqueName: \"kubernetes.io/projected/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-kube-api-access-2kq8z\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197367 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn5rn\" (UniqueName: \"kubernetes.io/projected/b506b359-c014-4d87-b053-ec4e7fc51ba2-kube-api-access-pn5rn\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197416 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-cert\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.197440 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-metrics-certs\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.197434 4787 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.197563 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist podName:ea0fd311-2d72-463c-8f9b-2c9ba1dc8903 nodeName:}" failed. No retries permitted until 2026-01-29 13:32:05.697535143 +0000 UTC m=+964.458795419 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist") pod "speaker-ntsqx" (UID: "ea0fd311-2d72-463c-8f9b-2c9ba1dc8903") : secret "metallb-memberlist" not found Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.198090 4787 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.198134 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs podName:ea0fd311-2d72-463c-8f9b-2c9ba1dc8903 nodeName:}" failed. No retries permitted until 2026-01-29 13:32:05.69812587 +0000 UTC m=+964.459386146 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs") pod "speaker-ntsqx" (UID: "ea0fd311-2d72-463c-8f9b-2c9ba1dc8903") : secret "speaker-certs-secret" not found Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.199751 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metallb-excludel2\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.202929 4787 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.207390 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-metrics-certs\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.213992 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b506b359-c014-4d87-b053-ec4e7fc51ba2-cert\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.220957 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.231208 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kq8z\" (UniqueName: \"kubernetes.io/projected/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-kube-api-access-2kq8z\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.246251 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn5rn\" (UniqueName: \"kubernetes.io/projected/b506b359-c014-4d87-b053-ec4e7fc51ba2-kube-api-access-pn5rn\") pod \"controller-6968d8fdc4-94cfr\" (UID: \"b506b359-c014-4d87-b053-ec4e7fc51ba2\") " pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.353792 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:05 crc kubenswrapper[4787]: W0129 13:32:05.533537 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4c81250_d1f0_4706_8c3f_70e69c976131.slice/crio-43a64ae47e6d6dd2ff1bab02c474fcf3528084bbf100bbff27d51320b1600f79 WatchSource:0}: Error finding container 43a64ae47e6d6dd2ff1bab02c474fcf3528084bbf100bbff27d51320b1600f79: Status 404 returned error can't find the container with id 43a64ae47e6d6dd2ff1bab02c474fcf3528084bbf100bbff27d51320b1600f79 Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.535352 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9"] Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.605568 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.609645 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-94cfr"] Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.610258 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" event={"ID":"d4c81250-d1f0-4706-8c3f-70e69c976131","Type":"ContainerStarted","Data":"43a64ae47e6d6dd2ff1bab02c474fcf3528084bbf100bbff27d51320b1600f79"} Jan 29 13:32:05 crc kubenswrapper[4787]: W0129 13:32:05.614405 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb506b359_c014_4d87_b053_ec4e7fc51ba2.slice/crio-e78f6accd4cca7b98e0efe09a25c0bb04cf6263b133e30e5581dfcb09669412b WatchSource:0}: Error finding container e78f6accd4cca7b98e0efe09a25c0bb04cf6263b133e30e5581dfcb09669412b: Status 404 returned error can't find the container with id e78f6accd4cca7b98e0efe09a25c0bb04cf6263b133e30e5581dfcb09669412b Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.615013 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/499c781e-42ec-4475-ab1e-d5204f4bdac4-metrics-certs\") pod \"frr-k8s-8nc6k\" (UID: \"499c781e-42ec-4475-ab1e-d5204f4bdac4\") " pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.707028 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.707204 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.707370 4787 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 29 13:32:05 crc kubenswrapper[4787]: E0129 13:32:05.707504 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist podName:ea0fd311-2d72-463c-8f9b-2c9ba1dc8903 nodeName:}" failed. No retries permitted until 2026-01-29 13:32:06.707480159 +0000 UTC m=+965.468740445 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist") pod "speaker-ntsqx" (UID: "ea0fd311-2d72-463c-8f9b-2c9ba1dc8903") : secret "metallb-memberlist" not found Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.712786 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-metrics-certs\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:05 crc kubenswrapper[4787]: I0129 13:32:05.806014 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.620197 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-94cfr" event={"ID":"b506b359-c014-4d87-b053-ec4e7fc51ba2","Type":"ContainerStarted","Data":"b93d8f8db913a81be494f694916fc6ff001b536a90f9abe568627bdb28130268"} Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.620777 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.620791 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-94cfr" event={"ID":"b506b359-c014-4d87-b053-ec4e7fc51ba2","Type":"ContainerStarted","Data":"f404bc0f5df00d5e24850ea9dbd92e41d9e70735bf5d781632e9580c25f4f4fb"} Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.620801 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-94cfr" event={"ID":"b506b359-c014-4d87-b053-ec4e7fc51ba2","Type":"ContainerStarted","Data":"e78f6accd4cca7b98e0efe09a25c0bb04cf6263b133e30e5581dfcb09669412b"} Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.624488 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"9f30f510c716beda01cbcaacafbaf68d774920f8bd1424a2bc6bee9c1635a81e"} Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.646591 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-94cfr" podStartSLOduration=1.646572127 podStartE2EDuration="1.646572127s" podCreationTimestamp="2026-01-29 13:32:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:32:06.641228009 +0000 UTC m=+965.402488305" watchObservedRunningTime="2026-01-29 13:32:06.646572127 +0000 UTC m=+965.407832403" Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.724363 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.733433 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ea0fd311-2d72-463c-8f9b-2c9ba1dc8903-memberlist\") pod \"speaker-ntsqx\" (UID: \"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903\") " pod="metallb-system/speaker-ntsqx" Jan 29 13:32:06 crc kubenswrapper[4787]: I0129 13:32:06.846231 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ntsqx" Jan 29 13:32:06 crc kubenswrapper[4787]: W0129 13:32:06.891429 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea0fd311_2d72_463c_8f9b_2c9ba1dc8903.slice/crio-232b5aebd65761ea90295c3557284c6ea48be1c5af7ddcaaff41f604de489e88 WatchSource:0}: Error finding container 232b5aebd65761ea90295c3557284c6ea48be1c5af7ddcaaff41f604de489e88: Status 404 returned error can't find the container with id 232b5aebd65761ea90295c3557284c6ea48be1c5af7ddcaaff41f604de489e88 Jan 29 13:32:07 crc kubenswrapper[4787]: I0129 13:32:07.635328 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ntsqx" event={"ID":"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903","Type":"ContainerStarted","Data":"11a1606a8538d882fe5309cd4ccb86a520e708722ee8e7e48f351239c6ee270c"} Jan 29 13:32:07 crc kubenswrapper[4787]: I0129 13:32:07.636307 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ntsqx" event={"ID":"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903","Type":"ContainerStarted","Data":"56d5a8a4301feb33b5004281f4905b2cca0df35c230a251879a2aa7e182dc90c"} Jan 29 13:32:07 crc kubenswrapper[4787]: I0129 13:32:07.636323 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ntsqx" event={"ID":"ea0fd311-2d72-463c-8f9b-2c9ba1dc8903","Type":"ContainerStarted","Data":"232b5aebd65761ea90295c3557284c6ea48be1c5af7ddcaaff41f604de489e88"} Jan 29 13:32:07 crc kubenswrapper[4787]: I0129 13:32:07.636619 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-ntsqx" Jan 29 13:32:07 crc kubenswrapper[4787]: I0129 13:32:07.656546 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-ntsqx" podStartSLOduration=2.6565182529999998 podStartE2EDuration="2.656518253s" podCreationTimestamp="2026-01-29 13:32:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:32:07.655037212 +0000 UTC m=+966.416297508" watchObservedRunningTime="2026-01-29 13:32:07.656518253 +0000 UTC m=+966.417778539" Jan 29 13:32:14 crc kubenswrapper[4787]: I0129 13:32:14.705629 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" event={"ID":"d4c81250-d1f0-4706-8c3f-70e69c976131","Type":"ContainerStarted","Data":"d969dbb48cea418ec9e533087297f13bf11337840cbca860d796ff354b5eee06"} Jan 29 13:32:14 crc kubenswrapper[4787]: I0129 13:32:14.707352 4787 generic.go:334] "Generic (PLEG): container finished" podID="499c781e-42ec-4475-ab1e-d5204f4bdac4" containerID="3354ceb156008d9b24ababb24758b8cbe7dd6edaed265e475287938ffe15ef6d" exitCode=0 Jan 29 13:32:14 crc kubenswrapper[4787]: I0129 13:32:14.708020 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:14 crc kubenswrapper[4787]: I0129 13:32:14.708139 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerDied","Data":"3354ceb156008d9b24ababb24758b8cbe7dd6edaed265e475287938ffe15ef6d"} Jan 29 13:32:14 crc kubenswrapper[4787]: I0129 13:32:14.724180 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" podStartSLOduration=2.482864518 podStartE2EDuration="10.724141666s" podCreationTimestamp="2026-01-29 13:32:04 +0000 UTC" firstStartedPulling="2026-01-29 13:32:05.536029257 +0000 UTC m=+964.297289533" lastFinishedPulling="2026-01-29 13:32:13.777306405 +0000 UTC m=+972.538566681" observedRunningTime="2026-01-29 13:32:14.723814287 +0000 UTC m=+973.485074573" watchObservedRunningTime="2026-01-29 13:32:14.724141666 +0000 UTC m=+973.485401952" Jan 29 13:32:15 crc kubenswrapper[4787]: I0129 13:32:15.359061 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-94cfr" Jan 29 13:32:15 crc kubenswrapper[4787]: I0129 13:32:15.721086 4787 generic.go:334] "Generic (PLEG): container finished" podID="499c781e-42ec-4475-ab1e-d5204f4bdac4" containerID="ffbea23fee1fb8a5bb7927472ef3c5315b29e1c4937f1d26cc6c3e3e822fbf69" exitCode=0 Jan 29 13:32:15 crc kubenswrapper[4787]: I0129 13:32:15.721163 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerDied","Data":"ffbea23fee1fb8a5bb7927472ef3c5315b29e1c4937f1d26cc6c3e3e822fbf69"} Jan 29 13:32:16 crc kubenswrapper[4787]: I0129 13:32:16.732324 4787 generic.go:334] "Generic (PLEG): container finished" podID="499c781e-42ec-4475-ab1e-d5204f4bdac4" containerID="080ac09140613cd31fed670ab03c510ea6c5f9912ccb8ce1ee897371cc15840f" exitCode=0 Jan 29 13:32:16 crc kubenswrapper[4787]: I0129 13:32:16.735124 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerDied","Data":"080ac09140613cd31fed670ab03c510ea6c5f9912ccb8ce1ee897371cc15840f"} Jan 29 13:32:17 crc kubenswrapper[4787]: I0129 13:32:17.747109 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"cad0947fbfbb3e00f7ec059de28b0e99ba04b4179a4a62a5de975c0b022c337e"} Jan 29 13:32:17 crc kubenswrapper[4787]: I0129 13:32:17.747655 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"8d3098168240e525d1831f7d49a6dcd1b731e2769ff0f3f62a9a113636877126"} Jan 29 13:32:17 crc kubenswrapper[4787]: I0129 13:32:17.747669 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"aef03a5c331e8ebead8a4a13401f8c9962c4f04d43684d74af81a6b1bfc8954c"} Jan 29 13:32:17 crc kubenswrapper[4787]: I0129 13:32:17.747678 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"dea617b3819c6d4e3656f4e83722cda870458d4b215560176e2da70b5ed3478b"} Jan 29 13:32:17 crc kubenswrapper[4787]: I0129 13:32:17.747687 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"75b3718ed5f8d0d21b5388b0ee69b9bc07ca17fd5615e84207fec5a686b029df"} Jan 29 13:32:18 crc kubenswrapper[4787]: I0129 13:32:18.764859 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8nc6k" event={"ID":"499c781e-42ec-4475-ab1e-d5204f4bdac4","Type":"ContainerStarted","Data":"5b8f533c80818e5507b495cb852fb3284adfa5271e926fdb9ddfec639b63e424"} Jan 29 13:32:18 crc kubenswrapper[4787]: I0129 13:32:18.766618 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:18 crc kubenswrapper[4787]: I0129 13:32:18.813737 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-8nc6k" podStartSLOduration=6.999129916 podStartE2EDuration="14.813699121s" podCreationTimestamp="2026-01-29 13:32:04 +0000 UTC" firstStartedPulling="2026-01-29 13:32:05.937869182 +0000 UTC m=+964.699129458" lastFinishedPulling="2026-01-29 13:32:13.752438377 +0000 UTC m=+972.513698663" observedRunningTime="2026-01-29 13:32:18.803433417 +0000 UTC m=+977.564693723" watchObservedRunningTime="2026-01-29 13:32:18.813699121 +0000 UTC m=+977.574959437" Jan 29 13:32:20 crc kubenswrapper[4787]: I0129 13:32:20.807378 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:20 crc kubenswrapper[4787]: I0129 13:32:20.848689 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:25 crc kubenswrapper[4787]: I0129 13:32:25.230913 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-l82t9" Jan 29 13:32:26 crc kubenswrapper[4787]: I0129 13:32:26.851248 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-ntsqx" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.228103 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb"] Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.229574 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.231883 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.244666 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb"] Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.284854 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w24fx\" (UniqueName: \"kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.285004 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.285045 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.386380 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.386497 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w24fx\" (UniqueName: \"kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.386538 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.387039 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.387170 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.419231 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w24fx\" (UniqueName: \"kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.549475 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:28 crc kubenswrapper[4787]: I0129 13:32:28.989800 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb"] Jan 29 13:32:29 crc kubenswrapper[4787]: I0129 13:32:29.864589 4787 generic.go:334] "Generic (PLEG): container finished" podID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerID="d4d8b418c1477ec48c8c1eb0eef5c94828593b37a932bd2ef0a250a1996e675d" exitCode=0 Jan 29 13:32:29 crc kubenswrapper[4787]: I0129 13:32:29.864811 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" event={"ID":"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22","Type":"ContainerDied","Data":"d4d8b418c1477ec48c8c1eb0eef5c94828593b37a932bd2ef0a250a1996e675d"} Jan 29 13:32:29 crc kubenswrapper[4787]: I0129 13:32:29.865160 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" event={"ID":"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22","Type":"ContainerStarted","Data":"6b74f267012bf1e17a9e389be4261f4499f2398e8a7da1df776b331d7e74210d"} Jan 29 13:32:33 crc kubenswrapper[4787]: I0129 13:32:33.896939 4787 generic.go:334] "Generic (PLEG): container finished" podID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerID="5592e5c5764bd45c421ff97afb9d2bf84e4970aef2fbe059e32330718829981b" exitCode=0 Jan 29 13:32:33 crc kubenswrapper[4787]: I0129 13:32:33.897064 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" event={"ID":"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22","Type":"ContainerDied","Data":"5592e5c5764bd45c421ff97afb9d2bf84e4970aef2fbe059e32330718829981b"} Jan 29 13:32:34 crc kubenswrapper[4787]: I0129 13:32:34.909334 4787 generic.go:334] "Generic (PLEG): container finished" podID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerID="549af3c699890d6831332ff0ff520ea0c9a7a76faa12a882711ec5b7cd21e812" exitCode=0 Jan 29 13:32:34 crc kubenswrapper[4787]: I0129 13:32:34.909419 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" event={"ID":"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22","Type":"ContainerDied","Data":"549af3c699890d6831332ff0ff520ea0c9a7a76faa12a882711ec5b7cd21e812"} Jan 29 13:32:35 crc kubenswrapper[4787]: I0129 13:32:35.817387 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-8nc6k" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.194429 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.306331 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util\") pod \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.306483 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle\") pod \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.307566 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle" (OuterVolumeSpecName: "bundle") pod "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" (UID: "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.307637 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w24fx\" (UniqueName: \"kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx\") pod \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\" (UID: \"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22\") " Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.308630 4787 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.314941 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx" (OuterVolumeSpecName: "kube-api-access-w24fx") pod "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" (UID: "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22"). InnerVolumeSpecName "kube-api-access-w24fx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.331898 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util" (OuterVolumeSpecName: "util") pod "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" (UID: "2bed7c3e-80c2-41f8-8cb0-58f15efb7a22"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.409423 4787 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-util\") on node \"crc\" DevicePath \"\"" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.409481 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w24fx\" (UniqueName: \"kubernetes.io/projected/2bed7c3e-80c2-41f8-8cb0-58f15efb7a22-kube-api-access-w24fx\") on node \"crc\" DevicePath \"\"" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.927982 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" event={"ID":"2bed7c3e-80c2-41f8-8cb0-58f15efb7a22","Type":"ContainerDied","Data":"6b74f267012bf1e17a9e389be4261f4499f2398e8a7da1df776b331d7e74210d"} Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.928053 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b74f267012bf1e17a9e389be4261f4499f2398e8a7da1df776b331d7e74210d" Jan 29 13:32:36 crc kubenswrapper[4787]: I0129 13:32:36.928174 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.284889 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp"] Jan 29 13:32:41 crc kubenswrapper[4787]: E0129 13:32:41.286265 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="extract" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.286294 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="extract" Jan 29 13:32:41 crc kubenswrapper[4787]: E0129 13:32:41.286316 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="pull" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.286327 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="pull" Jan 29 13:32:41 crc kubenswrapper[4787]: E0129 13:32:41.286351 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="util" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.286361 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="util" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.286599 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bed7c3e-80c2-41f8-8cb0-58f15efb7a22" containerName="extract" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.287261 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.291397 4787 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-n88gf" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.292186 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.293303 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.310150 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp"] Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.387623 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357c093a-cad3-43e4-8c78-672222e2eb94-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.387859 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwtvx\" (UniqueName: \"kubernetes.io/projected/357c093a-cad3-43e4-8c78-672222e2eb94-kube-api-access-zwtvx\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.489314 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357c093a-cad3-43e4-8c78-672222e2eb94-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.489386 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwtvx\" (UniqueName: \"kubernetes.io/projected/357c093a-cad3-43e4-8c78-672222e2eb94-kube-api-access-zwtvx\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.490123 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/357c093a-cad3-43e4-8c78-672222e2eb94-tmp\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.525765 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwtvx\" (UniqueName: \"kubernetes.io/projected/357c093a-cad3-43e4-8c78-672222e2eb94-kube-api-access-zwtvx\") pod \"cert-manager-operator-controller-manager-66c8bdd694-2f9sp\" (UID: \"357c093a-cad3-43e4-8c78-672222e2eb94\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.606540 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" Jan 29 13:32:41 crc kubenswrapper[4787]: I0129 13:32:41.992183 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp"] Jan 29 13:32:41 crc kubenswrapper[4787]: W0129 13:32:41.997912 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod357c093a_cad3_43e4_8c78_672222e2eb94.slice/crio-f3e81696232e9ea501b5eb60c24ee311189791fb3b557ea3952a1af5876a3f3e WatchSource:0}: Error finding container f3e81696232e9ea501b5eb60c24ee311189791fb3b557ea3952a1af5876a3f3e: Status 404 returned error can't find the container with id f3e81696232e9ea501b5eb60c24ee311189791fb3b557ea3952a1af5876a3f3e Jan 29 13:32:42 crc kubenswrapper[4787]: I0129 13:32:42.978161 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" event={"ID":"357c093a-cad3-43e4-8c78-672222e2eb94","Type":"ContainerStarted","Data":"f3e81696232e9ea501b5eb60c24ee311189791fb3b557ea3952a1af5876a3f3e"} Jan 29 13:32:46 crc kubenswrapper[4787]: I0129 13:32:45.999659 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" event={"ID":"357c093a-cad3-43e4-8c78-672222e2eb94","Type":"ContainerStarted","Data":"200202a6c24177eccc29fdcd9cd11772bc1990f7695f75a5c6a8416f2f775c72"} Jan 29 13:32:46 crc kubenswrapper[4787]: I0129 13:32:46.030478 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-66c8bdd694-2f9sp" podStartSLOduration=2.041453594 podStartE2EDuration="5.030444885s" podCreationTimestamp="2026-01-29 13:32:41 +0000 UTC" firstStartedPulling="2026-01-29 13:32:42.002574947 +0000 UTC m=+1000.763835213" lastFinishedPulling="2026-01-29 13:32:44.991566228 +0000 UTC m=+1003.752826504" observedRunningTime="2026-01-29 13:32:46.028472471 +0000 UTC m=+1004.789732747" watchObservedRunningTime="2026-01-29 13:32:46.030444885 +0000 UTC m=+1004.791705161" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.716870 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-rnnjn"] Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.718369 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.720768 4787 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nzp4m" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.722383 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.722560 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.726734 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-rnnjn"] Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.805604 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdwfs\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-kube-api-access-cdwfs\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.805678 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.907195 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdwfs\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-kube-api-access-cdwfs\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.907293 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.949294 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-bound-sa-token\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:48 crc kubenswrapper[4787]: I0129 13:32:48.949550 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdwfs\" (UniqueName: \"kubernetes.io/projected/aa78f35d-c744-44e3-ae55-363c3f891fb9-kube-api-access-cdwfs\") pod \"cert-manager-webhook-6888856db4-rnnjn\" (UID: \"aa78f35d-c744-44e3-ae55-363c3f891fb9\") " pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.035241 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.258848 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-6888856db4-rnnjn"] Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.376605 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.378644 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.414216 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.414293 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.414448 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z886c\" (UniqueName: \"kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.416590 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.515500 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z886c\" (UniqueName: \"kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.515612 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.515665 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.516260 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.516293 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.536616 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z886c\" (UniqueName: \"kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c\") pod \"community-operators-5kvcj\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.702547 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:49 crc kubenswrapper[4787]: I0129 13:32:49.936296 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:32:50 crc kubenswrapper[4787]: I0129 13:32:50.047160 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerStarted","Data":"58a60ea4f185bf0b0b4aa897388a4fecbee26283d72f8948cee792de1ed49684"} Jan 29 13:32:50 crc kubenswrapper[4787]: I0129 13:32:50.048448 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" event={"ID":"aa78f35d-c744-44e3-ae55-363c3f891fb9","Type":"ContainerStarted","Data":"6307f34e812f2f6e616b2a6560996549f1ea7b942dda17b343e1e33a20b535ee"} Jan 29 13:32:51 crc kubenswrapper[4787]: I0129 13:32:51.065484 4787 generic.go:334] "Generic (PLEG): container finished" podID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerID="33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce" exitCode=0 Jan 29 13:32:51 crc kubenswrapper[4787]: I0129 13:32:51.065599 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerDied","Data":"33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce"} Jan 29 13:32:52 crc kubenswrapper[4787]: I0129 13:32:52.075324 4787 generic.go:334] "Generic (PLEG): container finished" podID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerID="3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18" exitCode=0 Jan 29 13:32:52 crc kubenswrapper[4787]: I0129 13:32:52.075471 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerDied","Data":"3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18"} Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.027073 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-2dtz4"] Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.028347 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.030263 4787 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-xw9sk" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.072092 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-2dtz4"] Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.097692 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" event={"ID":"aa78f35d-c744-44e3-ae55-363c3f891fb9","Type":"ContainerStarted","Data":"b255e0406384d655a022b1164381f88cafa785fbe7b13311077ded7b7901156d"} Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.098050 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.101678 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerStarted","Data":"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd"} Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.116987 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" podStartSLOduration=2.342766592 podStartE2EDuration="7.116963344s" podCreationTimestamp="2026-01-29 13:32:48 +0000 UTC" firstStartedPulling="2026-01-29 13:32:49.285429614 +0000 UTC m=+1008.046689890" lastFinishedPulling="2026-01-29 13:32:54.059626366 +0000 UTC m=+1012.820886642" observedRunningTime="2026-01-29 13:32:55.113317954 +0000 UTC m=+1013.874578250" watchObservedRunningTime="2026-01-29 13:32:55.116963344 +0000 UTC m=+1013.878223620" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.144335 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5kvcj" podStartSLOduration=3.168089213 podStartE2EDuration="6.144306881s" podCreationTimestamp="2026-01-29 13:32:49 +0000 UTC" firstStartedPulling="2026-01-29 13:32:51.069475753 +0000 UTC m=+1009.830736019" lastFinishedPulling="2026-01-29 13:32:54.045693411 +0000 UTC m=+1012.806953687" observedRunningTime="2026-01-29 13:32:55.140149476 +0000 UTC m=+1013.901409752" watchObservedRunningTime="2026-01-29 13:32:55.144306881 +0000 UTC m=+1013.905567157" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.219507 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.219643 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pm2t\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-kube-api-access-2pm2t\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.321114 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.321226 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pm2t\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-kube-api-access-2pm2t\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.344120 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-bound-sa-token\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.347260 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pm2t\" (UniqueName: \"kubernetes.io/projected/e6a3f740-0b80-4393-b901-8058c12d3bcc-kube-api-access-2pm2t\") pod \"cert-manager-cainjector-5545bd876-2dtz4\" (UID: \"e6a3f740-0b80-4393-b901-8058c12d3bcc\") " pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.645630 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" Jan 29 13:32:55 crc kubenswrapper[4787]: I0129 13:32:55.914287 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-5545bd876-2dtz4"] Jan 29 13:32:56 crc kubenswrapper[4787]: I0129 13:32:56.110162 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" event={"ID":"e6a3f740-0b80-4393-b901-8058c12d3bcc","Type":"ContainerStarted","Data":"16991155eed7d29441dc1dc24cc0492a7a0b7dbe66994f67c965ef6de53649eb"} Jan 29 13:32:56 crc kubenswrapper[4787]: I0129 13:32:56.110230 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" event={"ID":"e6a3f740-0b80-4393-b901-8058c12d3bcc","Type":"ContainerStarted","Data":"81a11056f7eff950bcab4eeaee15fce0801c2c511b66bf186f6f5e42fdfe823b"} Jan 29 13:32:56 crc kubenswrapper[4787]: I0129 13:32:56.129778 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-5545bd876-2dtz4" podStartSLOduration=1.12976043 podStartE2EDuration="1.12976043s" podCreationTimestamp="2026-01-29 13:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:32:56.129351289 +0000 UTC m=+1014.890611565" watchObservedRunningTime="2026-01-29 13:32:56.12976043 +0000 UTC m=+1014.891020706" Jan 29 13:32:57 crc kubenswrapper[4787]: I0129 13:32:57.908367 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-545d4d4674-ks2h5"] Jan 29 13:32:57 crc kubenswrapper[4787]: I0129 13:32:57.910152 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:57 crc kubenswrapper[4787]: I0129 13:32:57.912721 4787 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-9j8l5" Jan 29 13:32:57 crc kubenswrapper[4787]: I0129 13:32:57.927081 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-ks2h5"] Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.065697 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t822v\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-kube-api-access-t822v\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.065875 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-bound-sa-token\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.167635 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-bound-sa-token\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.167761 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t822v\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-kube-api-access-t822v\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.207271 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-bound-sa-token\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.209352 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t822v\" (UniqueName: \"kubernetes.io/projected/3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0-kube-api-access-t822v\") pod \"cert-manager-545d4d4674-ks2h5\" (UID: \"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0\") " pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.231593 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-545d4d4674-ks2h5" Jan 29 13:32:58 crc kubenswrapper[4787]: I0129 13:32:58.783666 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-545d4d4674-ks2h5"] Jan 29 13:32:58 crc kubenswrapper[4787]: W0129 13:32:58.801698 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3689cb82_2282_4fd1_bd6e_bbcff6e1f8f0.slice/crio-9efa5a603decf11985e5a29b9780f5ceb60b94b3ca9ed7b65f2bff1d127de98b WatchSource:0}: Error finding container 9efa5a603decf11985e5a29b9780f5ceb60b94b3ca9ed7b65f2bff1d127de98b: Status 404 returned error can't find the container with id 9efa5a603decf11985e5a29b9780f5ceb60b94b3ca9ed7b65f2bff1d127de98b Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.039013 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-6888856db4-rnnjn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.133802 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-ks2h5" event={"ID":"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0","Type":"ContainerStarted","Data":"5775566d13a401ed1b7871408ce963ca6c664ffdd902528f0928a6b2455b730d"} Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.133865 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-545d4d4674-ks2h5" event={"ID":"3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0","Type":"ContainerStarted","Data":"9efa5a603decf11985e5a29b9780f5ceb60b94b3ca9ed7b65f2bff1d127de98b"} Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.155295 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-545d4d4674-ks2h5" podStartSLOduration=2.155270041 podStartE2EDuration="2.155270041s" podCreationTimestamp="2026-01-29 13:32:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:32:59.152370411 +0000 UTC m=+1017.913630697" watchObservedRunningTime="2026-01-29 13:32:59.155270041 +0000 UTC m=+1017.916530327" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.368086 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.378773 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.398485 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.491028 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnf67\" (UniqueName: \"kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.491111 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.491180 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.593596 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnf67\" (UniqueName: \"kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.593712 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.593775 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.594286 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.594580 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.628433 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnf67\" (UniqueName: \"kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67\") pod \"certified-operators-262wn\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.703226 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.703286 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.704242 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:32:59 crc kubenswrapper[4787]: I0129 13:32:59.790036 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:33:00 crc kubenswrapper[4787]: I0129 13:33:00.026962 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:33:00 crc kubenswrapper[4787]: I0129 13:33:00.140516 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerStarted","Data":"adc71948062e31c236407172dd6d57b1c223844ba4e07ee4bbe5844c59821714"} Jan 29 13:33:00 crc kubenswrapper[4787]: I0129 13:33:00.213816 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:33:01 crc kubenswrapper[4787]: I0129 13:33:01.148351 4787 generic.go:334] "Generic (PLEG): container finished" podID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerID="8e3230cd5f9aa22d579a5b708696978d2fe691dff201f890fa81185af59f635e" exitCode=0 Jan 29 13:33:01 crc kubenswrapper[4787]: I0129 13:33:01.148417 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerDied","Data":"8e3230cd5f9aa22d579a5b708696978d2fe691dff201f890fa81185af59f635e"} Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.089005 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.157965 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerStarted","Data":"44775f4018d439eb4a02fe01a4ee47c87120861ba2b9cbd158b75cb61677eb9b"} Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.158480 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5kvcj" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="registry-server" containerID="cri-o://3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd" gracePeriod=2 Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.551521 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.647886 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content\") pod \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.648036 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities\") pod \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.648187 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z886c\" (UniqueName: \"kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c\") pod \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\" (UID: \"7e2e4b21-f56a-42a5-bf16-a356f575a88b\") " Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.649053 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities" (OuterVolumeSpecName: "utilities") pod "7e2e4b21-f56a-42a5-bf16-a356f575a88b" (UID: "7e2e4b21-f56a-42a5-bf16-a356f575a88b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.656850 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c" (OuterVolumeSpecName: "kube-api-access-z886c") pod "7e2e4b21-f56a-42a5-bf16-a356f575a88b" (UID: "7e2e4b21-f56a-42a5-bf16-a356f575a88b"). InnerVolumeSpecName "kube-api-access-z886c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.704568 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e2e4b21-f56a-42a5-bf16-a356f575a88b" (UID: "7e2e4b21-f56a-42a5-bf16-a356f575a88b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.751627 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.751673 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z886c\" (UniqueName: \"kubernetes.io/projected/7e2e4b21-f56a-42a5-bf16-a356f575a88b-kube-api-access-z886c\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:02 crc kubenswrapper[4787]: I0129 13:33:02.751686 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e2e4b21-f56a-42a5-bf16-a356f575a88b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.168847 4787 generic.go:334] "Generic (PLEG): container finished" podID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerID="3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd" exitCode=0 Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.168960 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerDied","Data":"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd"} Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.169498 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5kvcj" event={"ID":"7e2e4b21-f56a-42a5-bf16-a356f575a88b","Type":"ContainerDied","Data":"58a60ea4f185bf0b0b4aa897388a4fecbee26283d72f8948cee792de1ed49684"} Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.169553 4787 scope.go:117] "RemoveContainer" containerID="3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.169102 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5kvcj" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.173428 4787 generic.go:334] "Generic (PLEG): container finished" podID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerID="44775f4018d439eb4a02fe01a4ee47c87120861ba2b9cbd158b75cb61677eb9b" exitCode=0 Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.173661 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerDied","Data":"44775f4018d439eb4a02fe01a4ee47c87120861ba2b9cbd158b75cb61677eb9b"} Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.196951 4787 scope.go:117] "RemoveContainer" containerID="3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.224613 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.234712 4787 scope.go:117] "RemoveContainer" containerID="33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.236840 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5kvcj"] Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.260968 4787 scope.go:117] "RemoveContainer" containerID="3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd" Jan 29 13:33:03 crc kubenswrapper[4787]: E0129 13:33:03.261668 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd\": container with ID starting with 3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd not found: ID does not exist" containerID="3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.261731 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd"} err="failed to get container status \"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd\": rpc error: code = NotFound desc = could not find container \"3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd\": container with ID starting with 3770884c0604c670f18da720615e7934899e8829394b8f78b4b275525f5e8ffd not found: ID does not exist" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.261774 4787 scope.go:117] "RemoveContainer" containerID="3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18" Jan 29 13:33:03 crc kubenswrapper[4787]: E0129 13:33:03.262331 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18\": container with ID starting with 3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18 not found: ID does not exist" containerID="3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.262357 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18"} err="failed to get container status \"3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18\": rpc error: code = NotFound desc = could not find container \"3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18\": container with ID starting with 3581011fb4666b97ce992ab15274fa2fbf24fac8564d7e9f369d6333268ccb18 not found: ID does not exist" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.262376 4787 scope.go:117] "RemoveContainer" containerID="33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce" Jan 29 13:33:03 crc kubenswrapper[4787]: E0129 13:33:03.262608 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce\": container with ID starting with 33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce not found: ID does not exist" containerID="33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.262632 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce"} err="failed to get container status \"33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce\": rpc error: code = NotFound desc = could not find container \"33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce\": container with ID starting with 33280a7fb79a390d06ae6d419cbd2132118b2f5840e8ea4209d4c15c8b53f9ce not found: ID does not exist" Jan 29 13:33:03 crc kubenswrapper[4787]: I0129 13:33:03.994734 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" path="/var/lib/kubelet/pods/7e2e4b21-f56a-42a5-bf16-a356f575a88b/volumes" Jan 29 13:33:04 crc kubenswrapper[4787]: I0129 13:33:04.191702 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerStarted","Data":"49d3cc0a971c6a5fd706919656909fff76b0ebd96b5856300de130b4dc0eea90"} Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.904977 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-262wn" podStartSLOduration=5.474742487 podStartE2EDuration="7.904945611s" podCreationTimestamp="2026-01-29 13:32:59 +0000 UTC" firstStartedPulling="2026-01-29 13:33:01.152051336 +0000 UTC m=+1019.913311632" lastFinishedPulling="2026-01-29 13:33:03.58225448 +0000 UTC m=+1022.343514756" observedRunningTime="2026-01-29 13:33:04.217722428 +0000 UTC m=+1022.978982714" watchObservedRunningTime="2026-01-29 13:33:06.904945611 +0000 UTC m=+1025.666205887" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.908228 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-m6lp4"] Jan 29 13:33:06 crc kubenswrapper[4787]: E0129 13:33:06.908560 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="extract-utilities" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.908580 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="extract-utilities" Jan 29 13:33:06 crc kubenswrapper[4787]: E0129 13:33:06.908599 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="extract-content" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.908608 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="extract-content" Jan 29 13:33:06 crc kubenswrapper[4787]: E0129 13:33:06.908617 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="registry-server" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.908622 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="registry-server" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.908748 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e2e4b21-f56a-42a5-bf16-a356f575a88b" containerName="registry-server" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.909252 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.913390 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.913404 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-477g8" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.913617 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 29 13:33:06 crc kubenswrapper[4787]: I0129 13:33:06.924642 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m6lp4"] Jan 29 13:33:07 crc kubenswrapper[4787]: I0129 13:33:07.018420 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wdzz\" (UniqueName: \"kubernetes.io/projected/cd1f5137-b9a8-4b27-baa6-f7117852d1fc-kube-api-access-7wdzz\") pod \"openstack-operator-index-m6lp4\" (UID: \"cd1f5137-b9a8-4b27-baa6-f7117852d1fc\") " pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:07 crc kubenswrapper[4787]: I0129 13:33:07.120029 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wdzz\" (UniqueName: \"kubernetes.io/projected/cd1f5137-b9a8-4b27-baa6-f7117852d1fc-kube-api-access-7wdzz\") pod \"openstack-operator-index-m6lp4\" (UID: \"cd1f5137-b9a8-4b27-baa6-f7117852d1fc\") " pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:07 crc kubenswrapper[4787]: I0129 13:33:07.154665 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wdzz\" (UniqueName: \"kubernetes.io/projected/cd1f5137-b9a8-4b27-baa6-f7117852d1fc-kube-api-access-7wdzz\") pod \"openstack-operator-index-m6lp4\" (UID: \"cd1f5137-b9a8-4b27-baa6-f7117852d1fc\") " pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:07 crc kubenswrapper[4787]: I0129 13:33:07.233762 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:07 crc kubenswrapper[4787]: I0129 13:33:07.449184 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-m6lp4"] Jan 29 13:33:07 crc kubenswrapper[4787]: W0129 13:33:07.462420 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd1f5137_b9a8_4b27_baa6_f7117852d1fc.slice/crio-fb56aad1804fc7824e3d01fe56ed1329811803b61e8b12289266c2178c11dc9d WatchSource:0}: Error finding container fb56aad1804fc7824e3d01fe56ed1329811803b61e8b12289266c2178c11dc9d: Status 404 returned error can't find the container with id fb56aad1804fc7824e3d01fe56ed1329811803b61e8b12289266c2178c11dc9d Jan 29 13:33:08 crc kubenswrapper[4787]: I0129 13:33:08.225281 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m6lp4" event={"ID":"cd1f5137-b9a8-4b27-baa6-f7117852d1fc","Type":"ContainerStarted","Data":"fb56aad1804fc7824e3d01fe56ed1329811803b61e8b12289266c2178c11dc9d"} Jan 29 13:33:09 crc kubenswrapper[4787]: I0129 13:33:09.238412 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-m6lp4" event={"ID":"cd1f5137-b9a8-4b27-baa6-f7117852d1fc","Type":"ContainerStarted","Data":"419402f16cf7912d009212e69e0285a396f8883819bc7ac6ca811eef291fe0f4"} Jan 29 13:33:09 crc kubenswrapper[4787]: I0129 13:33:09.266684 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-m6lp4" podStartSLOduration=2.385257649 podStartE2EDuration="3.26665339s" podCreationTimestamp="2026-01-29 13:33:06 +0000 UTC" firstStartedPulling="2026-01-29 13:33:07.464697185 +0000 UTC m=+1026.225957471" lastFinishedPulling="2026-01-29 13:33:08.346092896 +0000 UTC m=+1027.107353212" observedRunningTime="2026-01-29 13:33:09.259216075 +0000 UTC m=+1028.020476361" watchObservedRunningTime="2026-01-29 13:33:09.26665339 +0000 UTC m=+1028.027913706" Jan 29 13:33:09 crc kubenswrapper[4787]: I0129 13:33:09.705026 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:09 crc kubenswrapper[4787]: I0129 13:33:09.705110 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:09 crc kubenswrapper[4787]: I0129 13:33:09.790290 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:10 crc kubenswrapper[4787]: I0129 13:33:10.318947 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.093178 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.094185 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-262wn" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="registry-server" containerID="cri-o://49d3cc0a971c6a5fd706919656909fff76b0ebd96b5856300de130b4dc0eea90" gracePeriod=2 Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.280443 4787 generic.go:334] "Generic (PLEG): container finished" podID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerID="49d3cc0a971c6a5fd706919656909fff76b0ebd96b5856300de130b4dc0eea90" exitCode=0 Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.280483 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerDied","Data":"49d3cc0a971c6a5fd706919656909fff76b0ebd96b5856300de130b4dc0eea90"} Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.522242 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.643051 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content\") pod \"9fdbe96b-3dda-41c1-9096-089c1e835080\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.643127 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnf67\" (UniqueName: \"kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67\") pod \"9fdbe96b-3dda-41c1-9096-089c1e835080\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.643215 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities\") pod \"9fdbe96b-3dda-41c1-9096-089c1e835080\" (UID: \"9fdbe96b-3dda-41c1-9096-089c1e835080\") " Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.644212 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities" (OuterVolumeSpecName: "utilities") pod "9fdbe96b-3dda-41c1-9096-089c1e835080" (UID: "9fdbe96b-3dda-41c1-9096-089c1e835080"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.652640 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67" (OuterVolumeSpecName: "kube-api-access-fnf67") pod "9fdbe96b-3dda-41c1-9096-089c1e835080" (UID: "9fdbe96b-3dda-41c1-9096-089c1e835080"). InnerVolumeSpecName "kube-api-access-fnf67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.691256 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9fdbe96b-3dda-41c1-9096-089c1e835080" (UID: "9fdbe96b-3dda-41c1-9096-089c1e835080"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.745526 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.745569 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnf67\" (UniqueName: \"kubernetes.io/projected/9fdbe96b-3dda-41c1-9096-089c1e835080-kube-api-access-fnf67\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:14 crc kubenswrapper[4787]: I0129 13:33:14.745585 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fdbe96b-3dda-41c1-9096-089c1e835080-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.290978 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-262wn" event={"ID":"9fdbe96b-3dda-41c1-9096-089c1e835080","Type":"ContainerDied","Data":"adc71948062e31c236407172dd6d57b1c223844ba4e07ee4bbe5844c59821714"} Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.291156 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-262wn" Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.291892 4787 scope.go:117] "RemoveContainer" containerID="49d3cc0a971c6a5fd706919656909fff76b0ebd96b5856300de130b4dc0eea90" Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.314046 4787 scope.go:117] "RemoveContainer" containerID="44775f4018d439eb4a02fe01a4ee47c87120861ba2b9cbd158b75cb61677eb9b" Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.339250 4787 scope.go:117] "RemoveContainer" containerID="8e3230cd5f9aa22d579a5b708696978d2fe691dff201f890fa81185af59f635e" Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.339409 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.347570 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-262wn"] Jan 29 13:33:15 crc kubenswrapper[4787]: I0129 13:33:15.998954 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" path="/var/lib/kubelet/pods/9fdbe96b-3dda-41c1-9096-089c1e835080/volumes" Jan 29 13:33:17 crc kubenswrapper[4787]: I0129 13:33:17.235203 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:17 crc kubenswrapper[4787]: I0129 13:33:17.235870 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:17 crc kubenswrapper[4787]: I0129 13:33:17.281314 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:17 crc kubenswrapper[4787]: I0129 13:33:17.351584 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-m6lp4" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.563590 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg"] Jan 29 13:33:21 crc kubenswrapper[4787]: E0129 13:33:21.566329 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="extract-content" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.566624 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="extract-content" Jan 29 13:33:21 crc kubenswrapper[4787]: E0129 13:33:21.566827 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="registry-server" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.566980 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="registry-server" Jan 29 13:33:21 crc kubenswrapper[4787]: E0129 13:33:21.567168 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="extract-utilities" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.567332 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="extract-utilities" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.567815 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fdbe96b-3dda-41c1-9096-089c1e835080" containerName="registry-server" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.569979 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.574006 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qlqpm" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.576580 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg"] Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.661092 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.661185 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96lzs\" (UniqueName: \"kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.661664 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.764397 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.764594 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96lzs\" (UniqueName: \"kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.764691 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.765943 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.767410 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.802550 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96lzs\" (UniqueName: \"kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs\") pod \"b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:21 crc kubenswrapper[4787]: I0129 13:33:21.899609 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:22 crc kubenswrapper[4787]: I0129 13:33:22.453009 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg"] Jan 29 13:33:23 crc kubenswrapper[4787]: I0129 13:33:23.364364 4787 generic.go:334] "Generic (PLEG): container finished" podID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerID="49d8d560c546f6ce3bd8d5b33da526f5977e0182906e3e283e3b0471934654d6" exitCode=0 Jan 29 13:33:23 crc kubenswrapper[4787]: I0129 13:33:23.364490 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" event={"ID":"762c1b23-a057-48d8-a61b-bef88d9f588a","Type":"ContainerDied","Data":"49d8d560c546f6ce3bd8d5b33da526f5977e0182906e3e283e3b0471934654d6"} Jan 29 13:33:23 crc kubenswrapper[4787]: I0129 13:33:23.366891 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" event={"ID":"762c1b23-a057-48d8-a61b-bef88d9f588a","Type":"ContainerStarted","Data":"c91b77328466da0f2b6c061362ad08568db8fe13c4f86f43e1466c8be83c5536"} Jan 29 13:33:24 crc kubenswrapper[4787]: I0129 13:33:24.378139 4787 generic.go:334] "Generic (PLEG): container finished" podID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerID="d5e55360486bcb2025e629ae7c239d7a053c3827c712da5189bdb89473433709" exitCode=0 Jan 29 13:33:24 crc kubenswrapper[4787]: I0129 13:33:24.378220 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" event={"ID":"762c1b23-a057-48d8-a61b-bef88d9f588a","Type":"ContainerDied","Data":"d5e55360486bcb2025e629ae7c239d7a053c3827c712da5189bdb89473433709"} Jan 29 13:33:25 crc kubenswrapper[4787]: I0129 13:33:25.397284 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" event={"ID":"762c1b23-a057-48d8-a61b-bef88d9f588a","Type":"ContainerDied","Data":"911f302a531e0b42d0219fe971ffe1f8cbe4748f8deb7d31f088a49ca0c6799c"} Jan 29 13:33:25 crc kubenswrapper[4787]: I0129 13:33:25.398176 4787 generic.go:334] "Generic (PLEG): container finished" podID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerID="911f302a531e0b42d0219fe971ffe1f8cbe4748f8deb7d31f088a49ca0c6799c" exitCode=0 Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.678954 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.843022 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle\") pod \"762c1b23-a057-48d8-a61b-bef88d9f588a\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.843076 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util\") pod \"762c1b23-a057-48d8-a61b-bef88d9f588a\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.843126 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96lzs\" (UniqueName: \"kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs\") pod \"762c1b23-a057-48d8-a61b-bef88d9f588a\" (UID: \"762c1b23-a057-48d8-a61b-bef88d9f588a\") " Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.844153 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle" (OuterVolumeSpecName: "bundle") pod "762c1b23-a057-48d8-a61b-bef88d9f588a" (UID: "762c1b23-a057-48d8-a61b-bef88d9f588a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.851381 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs" (OuterVolumeSpecName: "kube-api-access-96lzs") pod "762c1b23-a057-48d8-a61b-bef88d9f588a" (UID: "762c1b23-a057-48d8-a61b-bef88d9f588a"). InnerVolumeSpecName "kube-api-access-96lzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.858287 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util" (OuterVolumeSpecName: "util") pod "762c1b23-a057-48d8-a61b-bef88d9f588a" (UID: "762c1b23-a057-48d8-a61b-bef88d9f588a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.945425 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96lzs\" (UniqueName: \"kubernetes.io/projected/762c1b23-a057-48d8-a61b-bef88d9f588a-kube-api-access-96lzs\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.945502 4787 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:26 crc kubenswrapper[4787]: I0129 13:33:26.945521 4787 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/762c1b23-a057-48d8-a61b-bef88d9f588a-util\") on node \"crc\" DevicePath \"\"" Jan 29 13:33:27 crc kubenswrapper[4787]: I0129 13:33:27.417131 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" event={"ID":"762c1b23-a057-48d8-a61b-bef88d9f588a","Type":"ContainerDied","Data":"c91b77328466da0f2b6c061362ad08568db8fe13c4f86f43e1466c8be83c5536"} Jan 29 13:33:27 crc kubenswrapper[4787]: I0129 13:33:27.417606 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c91b77328466da0f2b6c061362ad08568db8fe13c4f86f43e1466c8be83c5536" Jan 29 13:33:27 crc kubenswrapper[4787]: I0129 13:33:27.417234 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.699854 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-757f46c65d-22scc"] Jan 29 13:33:33 crc kubenswrapper[4787]: E0129 13:33:33.700802 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="pull" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.700817 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="pull" Jan 29 13:33:33 crc kubenswrapper[4787]: E0129 13:33:33.700838 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="util" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.700844 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="util" Jan 29 13:33:33 crc kubenswrapper[4787]: E0129 13:33:33.700860 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="extract" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.700867 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="extract" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.700992 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="762c1b23-a057-48d8-a61b-bef88d9f588a" containerName="extract" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.701453 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.707220 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-pvjtk" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.742608 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-757f46c65d-22scc"] Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.859745 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfgj6\" (UniqueName: \"kubernetes.io/projected/a07f8190-e680-4b1b-b445-6a8a2f98c85c-kube-api-access-xfgj6\") pod \"openstack-operator-controller-init-757f46c65d-22scc\" (UID: \"a07f8190-e680-4b1b-b445-6a8a2f98c85c\") " pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.961589 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfgj6\" (UniqueName: \"kubernetes.io/projected/a07f8190-e680-4b1b-b445-6a8a2f98c85c-kube-api-access-xfgj6\") pod \"openstack-operator-controller-init-757f46c65d-22scc\" (UID: \"a07f8190-e680-4b1b-b445-6a8a2f98c85c\") " pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:33 crc kubenswrapper[4787]: I0129 13:33:33.981722 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfgj6\" (UniqueName: \"kubernetes.io/projected/a07f8190-e680-4b1b-b445-6a8a2f98c85c-kube-api-access-xfgj6\") pod \"openstack-operator-controller-init-757f46c65d-22scc\" (UID: \"a07f8190-e680-4b1b-b445-6a8a2f98c85c\") " pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:34 crc kubenswrapper[4787]: I0129 13:33:34.018261 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:34 crc kubenswrapper[4787]: I0129 13:33:34.321298 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-757f46c65d-22scc"] Jan 29 13:33:34 crc kubenswrapper[4787]: I0129 13:33:34.475184 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" event={"ID":"a07f8190-e680-4b1b-b445-6a8a2f98c85c","Type":"ContainerStarted","Data":"e3537a1539b702faa0c7587bcd248cdda400638af7691dcb4714f8ca6ff96735"} Jan 29 13:33:39 crc kubenswrapper[4787]: I0129 13:33:39.528950 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" event={"ID":"a07f8190-e680-4b1b-b445-6a8a2f98c85c","Type":"ContainerStarted","Data":"32f5611d314f3e642e2cb322e39810cc15a7216ff83b9300474203ec352d205c"} Jan 29 13:33:39 crc kubenswrapper[4787]: I0129 13:33:39.530055 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:39 crc kubenswrapper[4787]: I0129 13:33:39.572934 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" podStartSLOduration=2.289301856 podStartE2EDuration="6.572897693s" podCreationTimestamp="2026-01-29 13:33:33 +0000 UTC" firstStartedPulling="2026-01-29 13:33:34.335447405 +0000 UTC m=+1053.096707681" lastFinishedPulling="2026-01-29 13:33:38.619043232 +0000 UTC m=+1057.380303518" observedRunningTime="2026-01-29 13:33:39.570688949 +0000 UTC m=+1058.331949255" watchObservedRunningTime="2026-01-29 13:33:39.572897693 +0000 UTC m=+1058.334158009" Jan 29 13:33:44 crc kubenswrapper[4787]: I0129 13:33:44.862755 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-757f46c65d-22scc" Jan 29 13:33:58 crc kubenswrapper[4787]: I0129 13:33:58.394748 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:33:58 crc kubenswrapper[4787]: I0129 13:33:58.395612 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.049298 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.053491 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.056670 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-sxhwb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.065189 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.066342 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.068562 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-pzj9d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.077551 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.089078 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.099166 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.100058 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.104736 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-p5tk4" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.113696 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.120669 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.126941 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.140319 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-vgm2k" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.203426 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.210953 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.212291 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.216112 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-8f9lt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.244706 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q54b\" (UniqueName: \"kubernetes.io/projected/fc5f8eee-854d-4c9f-9306-9c8976fdca42-kube-api-access-9q54b\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-mbqml\" (UID: \"fc5f8eee-854d-4c9f-9306-9c8976fdca42\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.245027 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtn2p\" (UniqueName: \"kubernetes.io/projected/4470a145-09b2-435b-ba61-8b96b442c503-kube-api-access-gtn2p\") pod \"glance-operator-controller-manager-8886f4c47-77c5v\" (UID: \"4470a145-09b2-435b-ba61-8b96b442c503\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.245145 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ctjv\" (UniqueName: \"kubernetes.io/projected/1c223b82-ca0b-4d31-b6ca-df34fd0684e4-kube-api-access-2ctjv\") pod \"designate-operator-controller-manager-6d9697b7f4-mgb4d\" (UID: \"1c223b82-ca0b-4d31-b6ca-df34fd0684e4\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.245237 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9wgg\" (UniqueName: \"kubernetes.io/projected/762f62bb-d090-474d-b9f1-36ec8943103f-kube-api-access-d9wgg\") pod \"cinder-operator-controller-manager-8d874c8fc-6gl4h\" (UID: \"762f62bb-d090-474d-b9f1-36ec8943103f\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.257665 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.258994 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.269486 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-pdf2m" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.272950 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.302633 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.317724 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-cktt7"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.318840 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.323140 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.323775 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.336151 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-85bh7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.336719 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.336804 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.336763 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-z9hgk" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.344086 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-cktt7"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.349515 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.350594 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.350930 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q54b\" (UniqueName: \"kubernetes.io/projected/fc5f8eee-854d-4c9f-9306-9c8976fdca42-kube-api-access-9q54b\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-mbqml\" (UID: \"fc5f8eee-854d-4c9f-9306-9c8976fdca42\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.350984 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtn2p\" (UniqueName: \"kubernetes.io/projected/4470a145-09b2-435b-ba61-8b96b442c503-kube-api-access-gtn2p\") pod \"glance-operator-controller-manager-8886f4c47-77c5v\" (UID: \"4470a145-09b2-435b-ba61-8b96b442c503\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.351012 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g29kt\" (UniqueName: \"kubernetes.io/projected/a05f08af-57ab-4b3b-b15a-05d66257ed6e-kube-api-access-g29kt\") pod \"heat-operator-controller-manager-69d6db494d-p7szc\" (UID: \"a05f08af-57ab-4b3b-b15a-05d66257ed6e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.351043 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ctjv\" (UniqueName: \"kubernetes.io/projected/1c223b82-ca0b-4d31-b6ca-df34fd0684e4-kube-api-access-2ctjv\") pod \"designate-operator-controller-manager-6d9697b7f4-mgb4d\" (UID: \"1c223b82-ca0b-4d31-b6ca-df34fd0684e4\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.351070 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9wgg\" (UniqueName: \"kubernetes.io/projected/762f62bb-d090-474d-b9f1-36ec8943103f-kube-api-access-d9wgg\") pod \"cinder-operator-controller-manager-8d874c8fc-6gl4h\" (UID: \"762f62bb-d090-474d-b9f1-36ec8943103f\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.358006 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-7tj6j" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.358832 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.368544 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.369775 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.381996 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.382804 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.382906 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.385554 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.386907 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-5vzpn" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.398835 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-nvww6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.406489 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ctjv\" (UniqueName: \"kubernetes.io/projected/1c223b82-ca0b-4d31-b6ca-df34fd0684e4-kube-api-access-2ctjv\") pod \"designate-operator-controller-manager-6d9697b7f4-mgb4d\" (UID: \"1c223b82-ca0b-4d31-b6ca-df34fd0684e4\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.408969 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.409936 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.411750 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q54b\" (UniqueName: \"kubernetes.io/projected/fc5f8eee-854d-4c9f-9306-9c8976fdca42-kube-api-access-9q54b\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-mbqml\" (UID: \"fc5f8eee-854d-4c9f-9306-9c8976fdca42\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.413388 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtn2p\" (UniqueName: \"kubernetes.io/projected/4470a145-09b2-435b-ba61-8b96b442c503-kube-api-access-gtn2p\") pod \"glance-operator-controller-manager-8886f4c47-77c5v\" (UID: \"4470a145-09b2-435b-ba61-8b96b442c503\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.427927 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.428218 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9wgg\" (UniqueName: \"kubernetes.io/projected/762f62bb-d090-474d-b9f1-36ec8943103f-kube-api-access-d9wgg\") pod \"cinder-operator-controller-manager-8d874c8fc-6gl4h\" (UID: \"762f62bb-d090-474d-b9f1-36ec8943103f\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.429074 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.429197 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.441122 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.443025 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.447202 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rftpb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.447818 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-b4z9j" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457022 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g29kt\" (UniqueName: \"kubernetes.io/projected/a05f08af-57ab-4b3b-b15a-05d66257ed6e-kube-api-access-g29kt\") pod \"heat-operator-controller-manager-69d6db494d-p7szc\" (UID: \"a05f08af-57ab-4b3b-b15a-05d66257ed6e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457083 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjbt9\" (UniqueName: \"kubernetes.io/projected/89290a20-6551-4b90-940a-d3ac4c676efc-kube-api-access-tjbt9\") pod \"ironic-operator-controller-manager-5f4b8bd54d-jsfxt\" (UID: \"89290a20-6551-4b90-940a-d3ac4c676efc\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457121 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xk4d\" (UniqueName: \"kubernetes.io/projected/9f1a9e7c-3ca7-4fd3-ab71-838e77a80368-kube-api-access-2xk4d\") pod \"keystone-operator-controller-manager-84f48565d4-gjzmj\" (UID: \"9f1a9e7c-3ca7-4fd3-ab71-838e77a80368\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457182 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzgns\" (UniqueName: \"kubernetes.io/projected/c1d33841-e546-4927-a97a-ee8e6eee6765-kube-api-access-tzgns\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457208 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457252 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj5xv\" (UniqueName: \"kubernetes.io/projected/95712c95-56cf-4b2a-9590-fd82b55811c9-kube-api-access-nj5xv\") pod \"horizon-operator-controller-manager-5fb775575f-h4m5x\" (UID: \"95712c95-56cf-4b2a-9590-fd82b55811c9\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.457290 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkf5p\" (UniqueName: \"kubernetes.io/projected/bb138494-6a07-478c-b107-c1fd788bf4d7-kube-api-access-zkf5p\") pod \"nova-operator-controller-manager-55bff696bd-pjlwc\" (UID: \"bb138494-6a07-478c-b107-c1fd788bf4d7\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.467640 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.468843 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.471816 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.483177 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-9qgmf" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.483743 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.513990 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.515140 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.525727 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.526811 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.531311 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g29kt\" (UniqueName: \"kubernetes.io/projected/a05f08af-57ab-4b3b-b15a-05d66257ed6e-kube-api-access-g29kt\") pod \"heat-operator-controller-manager-69d6db494d-p7szc\" (UID: \"a05f08af-57ab-4b3b-b15a-05d66257ed6e\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.531490 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-57qsx" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.533981 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.539153 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.542053 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-wrrsm" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.542326 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567481 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b4v8\" (UniqueName: \"kubernetes.io/projected/bccdaac3-abf0-40b7-8421-499efcb20f1e-kube-api-access-2b4v8\") pod \"mariadb-operator-controller-manager-67bf948998-fgs9r\" (UID: \"bccdaac3-abf0-40b7-8421-499efcb20f1e\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567558 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjbt9\" (UniqueName: \"kubernetes.io/projected/89290a20-6551-4b90-940a-d3ac4c676efc-kube-api-access-tjbt9\") pod \"ironic-operator-controller-manager-5f4b8bd54d-jsfxt\" (UID: \"89290a20-6551-4b90-940a-d3ac4c676efc\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567593 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xk4d\" (UniqueName: \"kubernetes.io/projected/9f1a9e7c-3ca7-4fd3-ab71-838e77a80368-kube-api-access-2xk4d\") pod \"keystone-operator-controller-manager-84f48565d4-gjzmj\" (UID: \"9f1a9e7c-3ca7-4fd3-ab71-838e77a80368\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567616 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpr7c\" (UniqueName: \"kubernetes.io/projected/fd03e55b-eda1-478b-b41c-c97113cd3045-kube-api-access-vpr7c\") pod \"manila-operator-controller-manager-7dd968899f-wfpg9\" (UID: \"fd03e55b-eda1-478b-b41c-c97113cd3045\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567639 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzgns\" (UniqueName: \"kubernetes.io/projected/c1d33841-e546-4927-a97a-ee8e6eee6765-kube-api-access-tzgns\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567661 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567688 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skj72\" (UniqueName: \"kubernetes.io/projected/4eab04f9-d92c-40f2-bc47-970ecd86b6e4-kube-api-access-skj72\") pod \"neutron-operator-controller-manager-585dbc889-dqzfb\" (UID: \"4eab04f9-d92c-40f2-bc47-970ecd86b6e4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567710 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj5xv\" (UniqueName: \"kubernetes.io/projected/95712c95-56cf-4b2a-9590-fd82b55811c9-kube-api-access-nj5xv\") pod \"horizon-operator-controller-manager-5fb775575f-h4m5x\" (UID: \"95712c95-56cf-4b2a-9590-fd82b55811c9\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.567741 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkf5p\" (UniqueName: \"kubernetes.io/projected/bb138494-6a07-478c-b107-c1fd788bf4d7-kube-api-access-zkf5p\") pod \"nova-operator-controller-manager-55bff696bd-pjlwc\" (UID: \"bb138494-6a07-478c-b107-c1fd788bf4d7\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:08 crc kubenswrapper[4787]: E0129 13:34:08.569176 4787 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:08 crc kubenswrapper[4787]: E0129 13:34:08.569237 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert podName:c1d33841-e546-4927-a97a-ee8e6eee6765 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:09.069215239 +0000 UTC m=+1087.830475505 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert") pod "infra-operator-controller-manager-79955696d6-cktt7" (UID: "c1d33841-e546-4927-a97a-ee8e6eee6765") : secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.569565 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.570425 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.576709 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.598429 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.599042 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.587185 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-vgpl5" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.600433 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.614496 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj5xv\" (UniqueName: \"kubernetes.io/projected/95712c95-56cf-4b2a-9590-fd82b55811c9-kube-api-access-nj5xv\") pod \"horizon-operator-controller-manager-5fb775575f-h4m5x\" (UID: \"95712c95-56cf-4b2a-9590-fd82b55811c9\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.617944 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xk4d\" (UniqueName: \"kubernetes.io/projected/9f1a9e7c-3ca7-4fd3-ab71-838e77a80368-kube-api-access-2xk4d\") pod \"keystone-operator-controller-manager-84f48565d4-gjzmj\" (UID: \"9f1a9e7c-3ca7-4fd3-ab71-838e77a80368\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.623186 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-kmtl9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.623289 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-q684m" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.624254 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjbt9\" (UniqueName: \"kubernetes.io/projected/89290a20-6551-4b90-940a-d3ac4c676efc-kube-api-access-tjbt9\") pod \"ironic-operator-controller-manager-5f4b8bd54d-jsfxt\" (UID: \"89290a20-6551-4b90-940a-d3ac4c676efc\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.634243 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkf5p\" (UniqueName: \"kubernetes.io/projected/bb138494-6a07-478c-b107-c1fd788bf4d7-kube-api-access-zkf5p\") pod \"nova-operator-controller-manager-55bff696bd-pjlwc\" (UID: \"bb138494-6a07-478c-b107-c1fd788bf4d7\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.638876 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzgns\" (UniqueName: \"kubernetes.io/projected/c1d33841-e546-4927-a97a-ee8e6eee6765-kube-api-access-tzgns\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.717810 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719290 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r79v\" (UniqueName: \"kubernetes.io/projected/50979af0-52a2-45bd-b6af-22e22daeacee-kube-api-access-8r79v\") pod \"placement-operator-controller-manager-5b964cf4cd-jn5rl\" (UID: \"50979af0-52a2-45bd-b6af-22e22daeacee\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719344 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b4v8\" (UniqueName: \"kubernetes.io/projected/bccdaac3-abf0-40b7-8421-499efcb20f1e-kube-api-access-2b4v8\") pod \"mariadb-operator-controller-manager-67bf948998-fgs9r\" (UID: \"bccdaac3-abf0-40b7-8421-499efcb20f1e\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719370 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719428 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh2hs\" (UniqueName: \"kubernetes.io/projected/1aac5893-712c-4305-be5c-058309de4369-kube-api-access-sh2hs\") pod \"ovn-operator-controller-manager-788c46999f-dlk2z\" (UID: \"1aac5893-712c-4305-be5c-058309de4369\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719482 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2jlk\" (UniqueName: \"kubernetes.io/projected/6327fc99-6096-4780-8d8d-11d454f09e83-kube-api-access-w2jlk\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719519 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpr7c\" (UniqueName: \"kubernetes.io/projected/fd03e55b-eda1-478b-b41c-c97113cd3045-kube-api-access-vpr7c\") pod \"manila-operator-controller-manager-7dd968899f-wfpg9\" (UID: \"fd03e55b-eda1-478b-b41c-c97113cd3045\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719546 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f95ms\" (UniqueName: \"kubernetes.io/projected/79858873-af90-4279-82dd-ff3a996bcb30-kube-api-access-f95ms\") pod \"telemetry-operator-controller-manager-64b5b76f97-dsk62\" (UID: \"79858873-af90-4279-82dd-ff3a996bcb30\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719669 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skj72\" (UniqueName: \"kubernetes.io/projected/4eab04f9-d92c-40f2-bc47-970ecd86b6e4-kube-api-access-skj72\") pod \"neutron-operator-controller-manager-585dbc889-dqzfb\" (UID: \"4eab04f9-d92c-40f2-bc47-970ecd86b6e4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.719727 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2bf9\" (UniqueName: \"kubernetes.io/projected/a5c3aabb-1f99-416b-9765-28cb31fc1b39-kube-api-access-d2bf9\") pod \"octavia-operator-controller-manager-6687f8d877-tdkqd\" (UID: \"a5c3aabb-1f99-416b-9765-28cb31fc1b39\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.720632 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.722231 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.740744 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.758108 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skj72\" (UniqueName: \"kubernetes.io/projected/4eab04f9-d92c-40f2-bc47-970ecd86b6e4-kube-api-access-skj72\") pod \"neutron-operator-controller-manager-585dbc889-dqzfb\" (UID: \"4eab04f9-d92c-40f2-bc47-970ecd86b6e4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.764380 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b4v8\" (UniqueName: \"kubernetes.io/projected/bccdaac3-abf0-40b7-8421-499efcb20f1e-kube-api-access-2b4v8\") pod \"mariadb-operator-controller-manager-67bf948998-fgs9r\" (UID: \"bccdaac3-abf0-40b7-8421-499efcb20f1e\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.764480 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.785137 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.785718 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.786232 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpr7c\" (UniqueName: \"kubernetes.io/projected/fd03e55b-eda1-478b-b41c-c97113cd3045-kube-api-access-vpr7c\") pod \"manila-operator-controller-manager-7dd968899f-wfpg9\" (UID: \"fd03e55b-eda1-478b-b41c-c97113cd3045\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.786362 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.803103 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822006 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2bf9\" (UniqueName: \"kubernetes.io/projected/a5c3aabb-1f99-416b-9765-28cb31fc1b39-kube-api-access-d2bf9\") pod \"octavia-operator-controller-manager-6687f8d877-tdkqd\" (UID: \"a5c3aabb-1f99-416b-9765-28cb31fc1b39\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822076 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r79v\" (UniqueName: \"kubernetes.io/projected/50979af0-52a2-45bd-b6af-22e22daeacee-kube-api-access-8r79v\") pod \"placement-operator-controller-manager-5b964cf4cd-jn5rl\" (UID: \"50979af0-52a2-45bd-b6af-22e22daeacee\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822112 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxw6p\" (UniqueName: \"kubernetes.io/projected/13817508-b3e5-4f29-94d9-84fc3192d6e6-kube-api-access-hxw6p\") pod \"swift-operator-controller-manager-68fc8c869-7p9r2\" (UID: \"13817508-b3e5-4f29-94d9-84fc3192d6e6\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822191 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh2hs\" (UniqueName: \"kubernetes.io/projected/1aac5893-712c-4305-be5c-058309de4369-kube-api-access-sh2hs\") pod \"ovn-operator-controller-manager-788c46999f-dlk2z\" (UID: \"1aac5893-712c-4305-be5c-058309de4369\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822222 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2jlk\" (UniqueName: \"kubernetes.io/projected/6327fc99-6096-4780-8d8d-11d454f09e83-kube-api-access-w2jlk\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.822251 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f95ms\" (UniqueName: \"kubernetes.io/projected/79858873-af90-4279-82dd-ff3a996bcb30-kube-api-access-f95ms\") pod \"telemetry-operator-controller-manager-64b5b76f97-dsk62\" (UID: \"79858873-af90-4279-82dd-ff3a996bcb30\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:08 crc kubenswrapper[4787]: E0129 13:34:08.823269 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:08 crc kubenswrapper[4787]: E0129 13:34:08.823320 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:09.323302373 +0000 UTC m=+1088.084562649 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.886593 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.891084 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.896872 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh2hs\" (UniqueName: \"kubernetes.io/projected/1aac5893-712c-4305-be5c-058309de4369-kube-api-access-sh2hs\") pod \"ovn-operator-controller-manager-788c46999f-dlk2z\" (UID: \"1aac5893-712c-4305-be5c-058309de4369\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.916959 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r79v\" (UniqueName: \"kubernetes.io/projected/50979af0-52a2-45bd-b6af-22e22daeacee-kube-api-access-8r79v\") pod \"placement-operator-controller-manager-5b964cf4cd-jn5rl\" (UID: \"50979af0-52a2-45bd-b6af-22e22daeacee\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.922393 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f95ms\" (UniqueName: \"kubernetes.io/projected/79858873-af90-4279-82dd-ff3a996bcb30-kube-api-access-f95ms\") pod \"telemetry-operator-controller-manager-64b5b76f97-dsk62\" (UID: \"79858873-af90-4279-82dd-ff3a996bcb30\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.924892 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxw6p\" (UniqueName: \"kubernetes.io/projected/13817508-b3e5-4f29-94d9-84fc3192d6e6-kube-api-access-hxw6p\") pod \"swift-operator-controller-manager-68fc8c869-7p9r2\" (UID: \"13817508-b3e5-4f29-94d9-84fc3192d6e6\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.930116 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2bf9\" (UniqueName: \"kubernetes.io/projected/a5c3aabb-1f99-416b-9765-28cb31fc1b39-kube-api-access-d2bf9\") pod \"octavia-operator-controller-manager-6687f8d877-tdkqd\" (UID: \"a5c3aabb-1f99-416b-9765-28cb31fc1b39\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.936067 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.943973 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.953030 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-pvs4d" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.959230 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2jlk\" (UniqueName: \"kubernetes.io/projected/6327fc99-6096-4780-8d8d-11d454f09e83-kube-api-access-w2jlk\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.965241 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxw6p\" (UniqueName: \"kubernetes.io/projected/13817508-b3e5-4f29-94d9-84fc3192d6e6-kube-api-access-hxw6p\") pod \"swift-operator-controller-manager-68fc8c869-7p9r2\" (UID: \"13817508-b3e5-4f29-94d9-84fc3192d6e6\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.978664 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m"] Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.981057 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:08 crc kubenswrapper[4787]: I0129 13:34:08.993831 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.008389 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-57x96"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.010811 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.014802 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-tpntx" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.023547 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-57x96"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.026942 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smtkx\" (UniqueName: \"kubernetes.io/projected/57cbf5c5-c741-4f43-881e-bf2dbecace54-kube-api-access-smtkx\") pod \"test-operator-controller-manager-56f8bfcd9f-glb7m\" (UID: \"57cbf5c5-c741-4f43-881e-bf2dbecace54\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.032861 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.055205 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.058564 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.061404 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.061569 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.066089 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.068967 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-62vlk" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.074006 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.088512 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.089908 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.094591 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-txkzf" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.100226 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.125011 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.126034 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.127791 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bktj\" (UniqueName: \"kubernetes.io/projected/3203adf3-229d-4717-95c2-d0dd83d6909b-kube-api-access-7bktj\") pod \"watcher-operator-controller-manager-564965969-57x96\" (UID: \"3203adf3-229d-4717-95c2-d0dd83d6909b\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.127896 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.127959 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smtkx\" (UniqueName: \"kubernetes.io/projected/57cbf5c5-c741-4f43-881e-bf2dbecace54-kube-api-access-smtkx\") pod \"test-operator-controller-manager-56f8bfcd9f-glb7m\" (UID: \"57cbf5c5-c741-4f43-881e-bf2dbecace54\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.128009 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.128340 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.128387 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24zdp\" (UniqueName: \"kubernetes.io/projected/9046af2e-08a5-402f-97ee-5946a966b8f7-kube-api-access-24zdp\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wk5tx\" (UID: \"9046af2e-08a5-402f-97ee-5946a966b8f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.128428 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfhv7\" (UniqueName: \"kubernetes.io/projected/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-kube-api-access-zfhv7\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.128899 4787 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.129116 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert podName:c1d33841-e546-4927-a97a-ee8e6eee6765 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:10.129095783 +0000 UTC m=+1088.890356059 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert") pod "infra-operator-controller-manager-79955696d6-cktt7" (UID: "c1d33841-e546-4927-a97a-ee8e6eee6765") : secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.142794 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.155342 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smtkx\" (UniqueName: \"kubernetes.io/projected/57cbf5c5-c741-4f43-881e-bf2dbecace54-kube-api-access-smtkx\") pod \"test-operator-controller-manager-56f8bfcd9f-glb7m\" (UID: \"57cbf5c5-c741-4f43-881e-bf2dbecace54\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.230374 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfhv7\" (UniqueName: \"kubernetes.io/projected/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-kube-api-access-zfhv7\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.230434 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bktj\" (UniqueName: \"kubernetes.io/projected/3203adf3-229d-4717-95c2-d0dd83d6909b-kube-api-access-7bktj\") pod \"watcher-operator-controller-manager-564965969-57x96\" (UID: \"3203adf3-229d-4717-95c2-d0dd83d6909b\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.230600 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.230629 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.230650 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24zdp\" (UniqueName: \"kubernetes.io/projected/9046af2e-08a5-402f-97ee-5946a966b8f7-kube-api-access-24zdp\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wk5tx\" (UID: \"9046af2e-08a5-402f-97ee-5946a966b8f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.231414 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.231771 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.231891 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:09.731867597 +0000 UTC m=+1088.493127873 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.240248 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:09.740213037 +0000 UTC m=+1088.501473313 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.254705 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24zdp\" (UniqueName: \"kubernetes.io/projected/9046af2e-08a5-402f-97ee-5946a966b8f7-kube-api-access-24zdp\") pod \"rabbitmq-cluster-operator-manager-668c99d594-wk5tx\" (UID: \"9046af2e-08a5-402f-97ee-5946a966b8f7\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.256605 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bktj\" (UniqueName: \"kubernetes.io/projected/3203adf3-229d-4717-95c2-d0dd83d6909b-kube-api-access-7bktj\") pod \"watcher-operator-controller-manager-564965969-57x96\" (UID: \"3203adf3-229d-4717-95c2-d0dd83d6909b\") " pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.256739 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfhv7\" (UniqueName: \"kubernetes.io/projected/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-kube-api-access-zfhv7\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.333330 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.334506 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.334565 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:10.334543078 +0000 UTC m=+1089.095803344 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.456203 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.487993 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.511066 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.562298 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v"] Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.749046 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: I0129 13:34:09.749097 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.749323 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.749394 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:10.749373813 +0000 UTC m=+1089.510634089 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.749674 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:09 crc kubenswrapper[4787]: E0129 13:34:09.749789 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:10.749761604 +0000 UTC m=+1089.511021880 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.000481 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.007994 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc"] Jan 29 13:34:10 crc kubenswrapper[4787]: W0129 13:34:10.012885 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb138494_6a07_478c_b107_c1fd788bf4d7.slice/crio-080b63cc7683948ecc2de9bd906ca27bed0deba75f29506ef1a857630316340b WatchSource:0}: Error finding container 080b63cc7683948ecc2de9bd906ca27bed0deba75f29506ef1a857630316340b: Status 404 returned error can't find the container with id 080b63cc7683948ecc2de9bd906ca27bed0deba75f29506ef1a857630316340b Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.101638 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" event={"ID":"fc5f8eee-854d-4c9f-9306-9c8976fdca42","Type":"ContainerStarted","Data":"fc1a2cb5da5571eab4bb4b499a720815ab3f2a1c2fadec96546ade40935897f8"} Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.104015 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" event={"ID":"4470a145-09b2-435b-ba61-8b96b442c503","Type":"ContainerStarted","Data":"2db923d98851f6bb98c8c580bf3d7281db0fd7bd13af750c7c30e82ef9478e10"} Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.106523 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" event={"ID":"bb138494-6a07-478c-b107-c1fd788bf4d7","Type":"ContainerStarted","Data":"080b63cc7683948ecc2de9bd906ca27bed0deba75f29506ef1a857630316340b"} Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.155535 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.155726 4787 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.155807 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert podName:c1d33841-e546-4927-a97a-ee8e6eee6765 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:12.155772596 +0000 UTC m=+1090.917032862 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert") pod "infra-operator-controller-manager-79955696d6-cktt7" (UID: "c1d33841-e546-4927-a97a-ee8e6eee6765") : secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.174227 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r"] Jan 29 13:34:10 crc kubenswrapper[4787]: W0129 13:34:10.193428 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd03e55b_eda1_478b_b41c_c97113cd3045.slice/crio-79b9de12cff795a4b9b8da9d21e56782cbf0d9927546aaaa2343a070df088683 WatchSource:0}: Error finding container 79b9de12cff795a4b9b8da9d21e56782cbf0d9927546aaaa2343a070df088683: Status 404 returned error can't find the container with id 79b9de12cff795a4b9b8da9d21e56782cbf0d9927546aaaa2343a070df088683 Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.196568 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9"] Jan 29 13:34:10 crc kubenswrapper[4787]: W0129 13:34:10.208306 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda05f08af_57ab_4b3b_b15a_05d66257ed6e.slice/crio-072bcff62d3a5cd51f8cca20d08a2917a812e42761d75bc27f8cb4d74ec2af72 WatchSource:0}: Error finding container 072bcff62d3a5cd51f8cca20d08a2917a812e42761d75bc27f8cb4d74ec2af72: Status 404 returned error can't find the container with id 072bcff62d3a5cd51f8cca20d08a2917a812e42761d75bc27f8cb4d74ec2af72 Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.213420 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h"] Jan 29 13:34:10 crc kubenswrapper[4787]: W0129 13:34:10.214838 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89290a20_6551_4b90_940a_d3ac4c676efc.slice/crio-3f24c4efe14ec96a536bc5f0500fab20450cd7f5c20022a2a1bfccc106e1b9f4 WatchSource:0}: Error finding container 3f24c4efe14ec96a536bc5f0500fab20450cd7f5c20022a2a1bfccc106e1b9f4: Status 404 returned error can't find the container with id 3f24c4efe14ec96a536bc5f0500fab20450cd7f5c20022a2a1bfccc106e1b9f4 Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.228667 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.234948 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.242722 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.251829 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.263218 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.269011 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.277687 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2"] Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.282885 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hxw6p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68fc8c869-7p9r2_openstack-operators(13817508-b3e5-4f29-94d9-84fc3192d6e6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.284965 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" podUID="13817508-b3e5-4f29-94d9-84fc3192d6e6" Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.358180 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.358347 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.358420 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:12.358401101 +0000 UTC m=+1091.119661367 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.488495 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx"] Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.501121 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-24zdp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-wk5tx_openstack-operators(9046af2e-08a5-402f-97ee-5946a966b8f7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.502383 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" podUID="9046af2e-08a5-402f-97ee-5946a966b8f7" Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.505878 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.510799 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.516883 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-564965969-57x96"] Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.521671 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62"] Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.529312 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8r79v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-jn5rl_openstack-operators(50979af0-52a2-45bd-b6af-22e22daeacee): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.530598 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" podUID="50979af0-52a2-45bd-b6af-22e22daeacee" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.536251 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f95ms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-64b5b76f97-dsk62_openstack-operators(79858873-af90-4279-82dd-ff3a996bcb30): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.538722 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" podUID="79858873-af90-4279-82dd-ff3a996bcb30" Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.538776 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z"] Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.538873 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nj5xv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5fb775575f-h4m5x_openstack-operators(95712c95-56cf-4b2a-9590-fd82b55811c9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.540251 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" podUID="95712c95-56cf-4b2a-9590-fd82b55811c9" Jan 29 13:34:10 crc kubenswrapper[4787]: W0129 13:34:10.545252 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1aac5893_712c_4305_be5c_058309de4369.slice/crio-921c4cc788afe3996ee64c04b3c082cd375865c851b67fb4df9ba3e62c9cc092 WatchSource:0}: Error finding container 921c4cc788afe3996ee64c04b3c082cd375865c851b67fb4df9ba3e62c9cc092: Status 404 returned error can't find the container with id 921c4cc788afe3996ee64c04b3c082cd375865c851b67fb4df9ba3e62c9cc092 Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.545887 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x"] Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.545986 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7bktj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-564965969-57x96_openstack-operators(3203adf3-229d-4717-95c2-d0dd83d6909b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.547077 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" podUID="3203adf3-229d-4717-95c2-d0dd83d6909b" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.547503 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sh2hs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-788c46999f-dlk2z_openstack-operators(1aac5893-712c-4305-be5c-058309de4369): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.549479 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" podUID="1aac5893-712c-4305-be5c-058309de4369" Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.765395 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:10 crc kubenswrapper[4787]: I0129 13:34:10.765916 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.765558 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.766059 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:12.766038699 +0000 UTC m=+1091.527298975 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.765994 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:10 crc kubenswrapper[4787]: E0129 13:34:10.766432 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:12.7664236 +0000 UTC m=+1091.527683876 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.121125 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" event={"ID":"1aac5893-712c-4305-be5c-058309de4369","Type":"ContainerStarted","Data":"921c4cc788afe3996ee64c04b3c082cd375865c851b67fb4df9ba3e62c9cc092"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.124516 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" podUID="1aac5893-712c-4305-be5c-058309de4369" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.124512 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" event={"ID":"3203adf3-229d-4717-95c2-d0dd83d6909b","Type":"ContainerStarted","Data":"582e2d84a40bcd387044033ec9b9e16be2a10351361a56ed69ba31f5494965bf"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.128626 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" podUID="3203adf3-229d-4717-95c2-d0dd83d6909b" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.129411 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" event={"ID":"95712c95-56cf-4b2a-9590-fd82b55811c9","Type":"ContainerStarted","Data":"5932d45113ca5e87513b71abe899ad55b1e1b42e6a006bc5fae8989a98e59c6f"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.130502 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" podUID="95712c95-56cf-4b2a-9590-fd82b55811c9" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.132165 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" event={"ID":"1c223b82-ca0b-4d31-b6ca-df34fd0684e4","Type":"ContainerStarted","Data":"912d05dd36f169767cfa2f9e99d11293df38e3eac07ed4e9a8dbc5bc9fc82e4a"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.134708 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" event={"ID":"13817508-b3e5-4f29-94d9-84fc3192d6e6","Type":"ContainerStarted","Data":"0e8997a7c50d944bbdf5bf638ba244415f9a5ae70af6d7fbdbd62119f46b3cdd"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.139357 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" podUID="13817508-b3e5-4f29-94d9-84fc3192d6e6" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.149013 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" event={"ID":"a5c3aabb-1f99-416b-9765-28cb31fc1b39","Type":"ContainerStarted","Data":"0a425b1dc7936df9ee920dcd0d1935ca38d75f64e27c932c330df6112b90ac2b"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.161795 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" event={"ID":"89290a20-6551-4b90-940a-d3ac4c676efc","Type":"ContainerStarted","Data":"3f24c4efe14ec96a536bc5f0500fab20450cd7f5c20022a2a1bfccc106e1b9f4"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.168678 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" event={"ID":"762f62bb-d090-474d-b9f1-36ec8943103f","Type":"ContainerStarted","Data":"36ec1cd06cc381d2c81a485b38d1d2de3b84c217f504120ed7dfba7ded26d6fa"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.182815 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" event={"ID":"9046af2e-08a5-402f-97ee-5946a966b8f7","Type":"ContainerStarted","Data":"a585f5ec61f3bbb6c3497e8e38c9af81f9e9a9b58a4b7f12da042ab7e89a9e1a"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.191464 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" podUID="9046af2e-08a5-402f-97ee-5946a966b8f7" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.192730 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" event={"ID":"79858873-af90-4279-82dd-ff3a996bcb30","Type":"ContainerStarted","Data":"88dddf24dcfa3f1a20605303429f7a5204119f1ab2904f2e00a43706fbb96a09"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.194443 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" podUID="79858873-af90-4279-82dd-ff3a996bcb30" Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.196085 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" event={"ID":"fd03e55b-eda1-478b-b41c-c97113cd3045","Type":"ContainerStarted","Data":"79b9de12cff795a4b9b8da9d21e56782cbf0d9927546aaaa2343a070df088683"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.199446 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" event={"ID":"9f1a9e7c-3ca7-4fd3-ab71-838e77a80368","Type":"ContainerStarted","Data":"875cb613fb55be9889148fb7276fd47da182c339e7ddc90e9446d3f5070725fb"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.201687 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" event={"ID":"bccdaac3-abf0-40b7-8421-499efcb20f1e","Type":"ContainerStarted","Data":"6beef30b41ce956da010517f608b67ec39f6f335ecfd834ae4ee3a5bc982696e"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.211438 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" event={"ID":"57cbf5c5-c741-4f43-881e-bf2dbecace54","Type":"ContainerStarted","Data":"a3c095bf5239aa4252880d11e1c1ee66d45ea3cbdd455e99a977d4ac387676e1"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.223921 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" event={"ID":"4eab04f9-d92c-40f2-bc47-970ecd86b6e4","Type":"ContainerStarted","Data":"3661946d2fcbf02a1e146b2e6552dc60a749ea5a284b39da115a396a14497e5e"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.227522 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" event={"ID":"a05f08af-57ab-4b3b-b15a-05d66257ed6e","Type":"ContainerStarted","Data":"072bcff62d3a5cd51f8cca20d08a2917a812e42761d75bc27f8cb4d74ec2af72"} Jan 29 13:34:11 crc kubenswrapper[4787]: I0129 13:34:11.245762 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" event={"ID":"50979af0-52a2-45bd-b6af-22e22daeacee","Type":"ContainerStarted","Data":"76b6364690653b4e05c557c897b7a3b9babe8b7f10a404337c89e6d3de508d75"} Jan 29 13:34:11 crc kubenswrapper[4787]: E0129 13:34:11.249823 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" podUID="50979af0-52a2-45bd-b6af-22e22daeacee" Jan 29 13:34:12 crc kubenswrapper[4787]: I0129 13:34:12.215242 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.215423 4787 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.215515 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert podName:c1d33841-e546-4927-a97a-ee8e6eee6765 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:16.215492136 +0000 UTC m=+1094.976752412 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert") pod "infra-operator-controller-manager-79955696d6-cktt7" (UID: "c1d33841-e546-4927-a97a-ee8e6eee6765") : secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.259638 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:7869203f6f97de780368d507636031090fed3b658d2f7771acbd4481bdfc870b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" podUID="3203adf3-229d-4717-95c2-d0dd83d6909b" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.259665 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" podUID="50979af0-52a2-45bd-b6af-22e22daeacee" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.259934 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" podUID="95712c95-56cf-4b2a-9590-fd82b55811c9" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.259987 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:ea7b72b648a5bde2eebd804c2a5c1608d448a4892176c1b8d000c1eef4bb92b4\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" podUID="1aac5893-712c-4305-be5c-058309de4369" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.260135 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" podUID="9046af2e-08a5-402f-97ee-5946a966b8f7" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.260402 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" podUID="13817508-b3e5-4f29-94d9-84fc3192d6e6" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.260531 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:f9bf288cd0c13912404027a58ea3b90d4092b641e8265adc5c88644ea7fe901a\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" podUID="79858873-af90-4279-82dd-ff3a996bcb30" Jan 29 13:34:12 crc kubenswrapper[4787]: I0129 13:34:12.418853 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.419058 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.419130 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:16.419109479 +0000 UTC m=+1095.180369755 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.835227 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.835324 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:16.835302752 +0000 UTC m=+1095.596563028 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: I0129 13:34:12.835069 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:12 crc kubenswrapper[4787]: I0129 13:34:12.836337 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.836446 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:12 crc kubenswrapper[4787]: E0129 13:34:12.836502 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:16.836493096 +0000 UTC m=+1095.597753362 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: I0129 13:34:16.289946 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.290161 4787 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.290773 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert podName:c1d33841-e546-4927-a97a-ee8e6eee6765 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:24.290743963 +0000 UTC m=+1103.052004249 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert") pod "infra-operator-controller-manager-79955696d6-cktt7" (UID: "c1d33841-e546-4927-a97a-ee8e6eee6765") : secret "infra-operator-webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: I0129 13:34:16.493126 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.493358 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.493499 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:24.4934495 +0000 UTC m=+1103.254709766 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: I0129 13:34:16.899354 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:16 crc kubenswrapper[4787]: I0129 13:34:16.899416 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.899682 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.899699 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.899758 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:24.899735899 +0000 UTC m=+1103.660996165 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:16 crc kubenswrapper[4787]: E0129 13:34:16.899826 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:24.899794451 +0000 UTC m=+1103.661054837 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.362294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" event={"ID":"bccdaac3-abf0-40b7-8421-499efcb20f1e","Type":"ContainerStarted","Data":"03851ce808e5279d35191458b706243db407d58774dcd66539a472b25de3f632"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.363125 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.363404 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" event={"ID":"bb138494-6a07-478c-b107-c1fd788bf4d7","Type":"ContainerStarted","Data":"2450ad86883d7886758ba35a5a7859837d4a7c38168e05259b526c0b63583e13"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.364642 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" event={"ID":"57cbf5c5-c741-4f43-881e-bf2dbecace54","Type":"ContainerStarted","Data":"bca38d666aabe1c2f1bc9210b36a8d390d1fd4ec2fee1a6196e1441d18212cf7"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.364773 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.365747 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" event={"ID":"4eab04f9-d92c-40f2-bc47-970ecd86b6e4","Type":"ContainerStarted","Data":"c89d23357299dae2c815a8b46651caf67a2689e681d5ac48cf64007d34720f1c"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.365879 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.366799 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" event={"ID":"1c223b82-ca0b-4d31-b6ca-df34fd0684e4","Type":"ContainerStarted","Data":"7d08260c56949b366f97637a950052eb45da0b782c5880696c2418cffddb6792"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.366878 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.368102 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" event={"ID":"89290a20-6551-4b90-940a-d3ac4c676efc","Type":"ContainerStarted","Data":"941fc71a4dc6780f3225e425800ae2775d33c2f9933194427e6f617887909979"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.368180 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.369175 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" event={"ID":"762f62bb-d090-474d-b9f1-36ec8943103f","Type":"ContainerStarted","Data":"9e24ed8bfd1dbae1f348d65b6075fe1601ca762cecb36ac09446016dfa7640bc"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.369272 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.370303 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" event={"ID":"fc5f8eee-854d-4c9f-9306-9c8976fdca42","Type":"ContainerStarted","Data":"ca3768279b3c8b4c9060336af0cf672be0886b38981baac7947e2f874ff105cf"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.370662 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.371561 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" event={"ID":"fd03e55b-eda1-478b-b41c-c97113cd3045","Type":"ContainerStarted","Data":"c7d5c4315b1d3fa8138e9edd8bda91be45571ed534a7fa0b14125f43bfa3eec8"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.371886 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.372756 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" event={"ID":"a05f08af-57ab-4b3b-b15a-05d66257ed6e","Type":"ContainerStarted","Data":"10afc1e3745dbe3fb07f1a31ecd25db72ac7fb44b5c9e60cb6ec352bc7961371"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.373107 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.374477 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" event={"ID":"4470a145-09b2-435b-ba61-8b96b442c503","Type":"ContainerStarted","Data":"63b424f3dd2a2a7af49d98aa6ba78b6f7ea0b3e8aa4b833a1719784ffa1d4409"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.374630 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.376340 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" event={"ID":"a5c3aabb-1f99-416b-9765-28cb31fc1b39","Type":"ContainerStarted","Data":"0fea3553a4631a37f9aa91301853319e8b0852f9efeff0a4a0f509ab072e6d68"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.376437 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.377669 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" event={"ID":"9f1a9e7c-3ca7-4fd3-ab71-838e77a80368","Type":"ContainerStarted","Data":"8cda5e92672fa1d81092b39ccaee93c56b6c6d8a23e7b750c4fd8a5e247416bf"} Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.377835 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.466117 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" podStartSLOduration=3.2009239 podStartE2EDuration="14.466085601s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.183349119 +0000 UTC m=+1088.944609395" lastFinishedPulling="2026-01-29 13:34:21.44851082 +0000 UTC m=+1100.209771096" observedRunningTime="2026-01-29 13:34:22.402618147 +0000 UTC m=+1101.163878423" watchObservedRunningTime="2026-01-29 13:34:22.466085601 +0000 UTC m=+1101.227345877" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.469691 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" podStartSLOduration=3.043738912 podStartE2EDuration="14.469681925s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.017713297 +0000 UTC m=+1088.778973573" lastFinishedPulling="2026-01-29 13:34:21.44365631 +0000 UTC m=+1100.204916586" observedRunningTime="2026-01-29 13:34:22.461337585 +0000 UTC m=+1101.222597861" watchObservedRunningTime="2026-01-29 13:34:22.469681925 +0000 UTC m=+1101.230942201" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.517969 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" podStartSLOduration=3.237034719 podStartE2EDuration="14.517940582s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.24878344 +0000 UTC m=+1089.010043716" lastFinishedPulling="2026-01-29 13:34:21.529689303 +0000 UTC m=+1100.290949579" observedRunningTime="2026-01-29 13:34:22.509965703 +0000 UTC m=+1101.271225999" watchObservedRunningTime="2026-01-29 13:34:22.517940582 +0000 UTC m=+1101.279200858" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.566020 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" podStartSLOduration=3.076513835 podStartE2EDuration="14.565994884s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:09.998946888 +0000 UTC m=+1088.760207164" lastFinishedPulling="2026-01-29 13:34:21.488427937 +0000 UTC m=+1100.249688213" observedRunningTime="2026-01-29 13:34:22.56378848 +0000 UTC m=+1101.325048756" watchObservedRunningTime="2026-01-29 13:34:22.565994884 +0000 UTC m=+1101.327255160" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.619671 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" podStartSLOduration=3.33413337 podStartE2EDuration="14.619645836s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.205104114 +0000 UTC m=+1088.966364390" lastFinishedPulling="2026-01-29 13:34:21.49061658 +0000 UTC m=+1100.251876856" observedRunningTime="2026-01-29 13:34:22.618901294 +0000 UTC m=+1101.380161580" watchObservedRunningTime="2026-01-29 13:34:22.619645836 +0000 UTC m=+1101.380906112" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.663019 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" podStartSLOduration=2.782439541 podStartE2EDuration="14.662993562s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:09.614695882 +0000 UTC m=+1088.375956158" lastFinishedPulling="2026-01-29 13:34:21.495249903 +0000 UTC m=+1100.256510179" observedRunningTime="2026-01-29 13:34:22.660774808 +0000 UTC m=+1101.422035074" watchObservedRunningTime="2026-01-29 13:34:22.662993562 +0000 UTC m=+1101.424253838" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.699083 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" podStartSLOduration=3.45344259 podStartE2EDuration="14.699059649s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.249631064 +0000 UTC m=+1089.010891340" lastFinishedPulling="2026-01-29 13:34:21.495248113 +0000 UTC m=+1100.256508399" observedRunningTime="2026-01-29 13:34:22.696955988 +0000 UTC m=+1101.458216264" watchObservedRunningTime="2026-01-29 13:34:22.699059649 +0000 UTC m=+1101.460319925" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.807734 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" podStartSLOduration=3.554034561 podStartE2EDuration="14.807707532s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.21541272 +0000 UTC m=+1088.976672996" lastFinishedPulling="2026-01-29 13:34:21.469085691 +0000 UTC m=+1100.230345967" observedRunningTime="2026-01-29 13:34:22.782122906 +0000 UTC m=+1101.543383182" watchObservedRunningTime="2026-01-29 13:34:22.807707532 +0000 UTC m=+1101.568967798" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.807896 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" podStartSLOduration=3.5508220379999997 podStartE2EDuration="14.807892517s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.232250214 +0000 UTC m=+1088.993510490" lastFinishedPulling="2026-01-29 13:34:21.489320673 +0000 UTC m=+1100.250580969" observedRunningTime="2026-01-29 13:34:22.744851085 +0000 UTC m=+1101.506111361" watchObservedRunningTime="2026-01-29 13:34:22.807892517 +0000 UTC m=+1101.569152793" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.817923 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" podStartSLOduration=3.520661892 podStartE2EDuration="14.817897835s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.248868782 +0000 UTC m=+1089.010129058" lastFinishedPulling="2026-01-29 13:34:21.546104735 +0000 UTC m=+1100.307365001" observedRunningTime="2026-01-29 13:34:22.814369283 +0000 UTC m=+1101.575629559" watchObservedRunningTime="2026-01-29 13:34:22.817897835 +0000 UTC m=+1101.579158111" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.837271 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" podStartSLOduration=3.534218982 podStartE2EDuration="14.837247481s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.205124425 +0000 UTC m=+1088.966384701" lastFinishedPulling="2026-01-29 13:34:21.508152924 +0000 UTC m=+1100.269413200" observedRunningTime="2026-01-29 13:34:22.834802931 +0000 UTC m=+1101.596063207" watchObservedRunningTime="2026-01-29 13:34:22.837247481 +0000 UTC m=+1101.598507757" Jan 29 13:34:22 crc kubenswrapper[4787]: I0129 13:34:22.871035 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" podStartSLOduration=3.825636869 podStartE2EDuration="14.871007832s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.500761293 +0000 UTC m=+1089.262021569" lastFinishedPulling="2026-01-29 13:34:21.546132236 +0000 UTC m=+1100.307392532" observedRunningTime="2026-01-29 13:34:22.86296178 +0000 UTC m=+1101.624222056" watchObservedRunningTime="2026-01-29 13:34:22.871007832 +0000 UTC m=+1101.632268108" Jan 29 13:34:23 crc kubenswrapper[4787]: I0129 13:34:23.386316 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.297483 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.307287 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c1d33841-e546-4927-a97a-ee8e6eee6765-cert\") pod \"infra-operator-controller-manager-79955696d6-cktt7\" (UID: \"c1d33841-e546-4927-a97a-ee8e6eee6765\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.502064 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.502609 4787 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.502734 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert podName:6327fc99-6096-4780-8d8d-11d454f09e83 nodeName:}" failed. No retries permitted until 2026-01-29 13:34:40.502699186 +0000 UTC m=+1119.263959502 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert") pod "openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" (UID: "6327fc99-6096-4780-8d8d-11d454f09e83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.542614 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.913962 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:24 crc kubenswrapper[4787]: I0129 13:34:24.914573 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.914213 4787 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.914997 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:40.914964077 +0000 UTC m=+1119.676224393 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "metrics-server-cert" not found Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.914882 4787 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 29 13:34:24 crc kubenswrapper[4787]: E0129 13:34:24.915673 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs podName:c77e73c8-e6c7-4ae4-be36-4ef845996f9c nodeName:}" failed. No retries permitted until 2026-01-29 13:34:40.915563354 +0000 UTC m=+1119.676823640 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs") pod "openstack-operator-controller-manager-6b6f655c79-smwj6" (UID: "c77e73c8-e6c7-4ae4-be36-4ef845996f9c") : secret "webhook-server-cert" not found Jan 29 13:34:25 crc kubenswrapper[4787]: I0129 13:34:25.053767 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" podStartSLOduration=5.818672761 podStartE2EDuration="17.053743697s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.25610221 +0000 UTC m=+1089.017362486" lastFinishedPulling="2026-01-29 13:34:21.491173146 +0000 UTC m=+1100.252433422" observedRunningTime="2026-01-29 13:34:22.906634496 +0000 UTC m=+1101.667894772" watchObservedRunningTime="2026-01-29 13:34:25.053743697 +0000 UTC m=+1103.815003983" Jan 29 13:34:25 crc kubenswrapper[4787]: I0129 13:34:25.054503 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-cktt7"] Jan 29 13:34:25 crc kubenswrapper[4787]: I0129 13:34:25.410621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" event={"ID":"c1d33841-e546-4927-a97a-ee8e6eee6765","Type":"ContainerStarted","Data":"77d34c0fbe9e0eff252b8a0cb7d60409a10fa15e5430708c110735db388515b4"} Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.438850 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" event={"ID":"1aac5893-712c-4305-be5c-058309de4369","Type":"ContainerStarted","Data":"03656628c9fd024f30430aab1b9b88cdf913d523f1d0b59cf7a4b3b05af558cb"} Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.439497 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.440679 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" event={"ID":"13817508-b3e5-4f29-94d9-84fc3192d6e6","Type":"ContainerStarted","Data":"8fdc1cc055342a012ffe085fd23707664dbb2ca75f4373ff4f5bcb81855b87a5"} Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.441113 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.460788 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" podStartSLOduration=3.447749904 podStartE2EDuration="19.460735688s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.547372543 +0000 UTC m=+1089.308632809" lastFinishedPulling="2026-01-29 13:34:26.560358307 +0000 UTC m=+1105.321618593" observedRunningTime="2026-01-29 13:34:27.45278231 +0000 UTC m=+1106.214042596" watchObservedRunningTime="2026-01-29 13:34:27.460735688 +0000 UTC m=+1106.221995964" Jan 29 13:34:27 crc kubenswrapper[4787]: I0129 13:34:27.474694 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" podStartSLOduration=3.191002424 podStartE2EDuration="19.474664409s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.28255681 +0000 UTC m=+1089.043817086" lastFinishedPulling="2026-01-29 13:34:26.566218795 +0000 UTC m=+1105.327479071" observedRunningTime="2026-01-29 13:34:27.471530699 +0000 UTC m=+1106.232790975" watchObservedRunningTime="2026-01-29 13:34:27.474664409 +0000 UTC m=+1106.235924685" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.394358 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.394871 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.447294 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-mgb4d" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.474869 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-77c5v" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.546498 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-p7szc" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.723783 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-mbqml" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.724981 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-jsfxt" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.725053 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-6gl4h" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.790023 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-gjzmj" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.790976 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-pjlwc" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.893763 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-wfpg9" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.984811 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-fgs9r" Jan 29 13:34:28 crc kubenswrapper[4787]: I0129 13:34:28.997145 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-dqzfb" Jan 29 13:34:29 crc kubenswrapper[4787]: I0129 13:34:29.129328 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-tdkqd" Jan 29 13:34:29 crc kubenswrapper[4787]: I0129 13:34:29.459795 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-glb7m" Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.477753 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" event={"ID":"c1d33841-e546-4927-a97a-ee8e6eee6765","Type":"ContainerStarted","Data":"3979e605fe482661f1b6ade10c0936ba521d24b8f8f56a35e8b69984ff92e22f"} Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.479224 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.482582 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" event={"ID":"50979af0-52a2-45bd-b6af-22e22daeacee","Type":"ContainerStarted","Data":"a224b5558383d7ca5f8e3b265ced20dd58351963d93cbdbae0f40e6e24e90d9b"} Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.483189 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.509554 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" podStartSLOduration=18.252324782 podStartE2EDuration="22.509528831s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:25.076171991 +0000 UTC m=+1103.837432267" lastFinishedPulling="2026-01-29 13:34:29.33337604 +0000 UTC m=+1108.094636316" observedRunningTime="2026-01-29 13:34:30.501182811 +0000 UTC m=+1109.262443097" watchObservedRunningTime="2026-01-29 13:34:30.509528831 +0000 UTC m=+1109.270789107" Jan 29 13:34:30 crc kubenswrapper[4787]: I0129 13:34:30.518380 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" podStartSLOduration=3.706483244 podStartE2EDuration="22.518352364s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.529133809 +0000 UTC m=+1089.290394085" lastFinishedPulling="2026-01-29 13:34:29.341002929 +0000 UTC m=+1108.102263205" observedRunningTime="2026-01-29 13:34:30.515381429 +0000 UTC m=+1109.276641705" watchObservedRunningTime="2026-01-29 13:34:30.518352364 +0000 UTC m=+1109.279612640" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.519952 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" event={"ID":"95712c95-56cf-4b2a-9590-fd82b55811c9","Type":"ContainerStarted","Data":"a68c4ea4961d37822f7aa4165e9fc865b07e4f54f7d8f1a78d7c2f5e33c06531"} Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.521182 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.522685 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" event={"ID":"3203adf3-229d-4717-95c2-d0dd83d6909b","Type":"ContainerStarted","Data":"8ca5035218d33eb938effc10d6a52ead2a2d9d909a5096696e30f631123b47e0"} Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.523866 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.527248 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" event={"ID":"9046af2e-08a5-402f-97ee-5946a966b8f7","Type":"ContainerStarted","Data":"ffd1cc40268cf348da0640ed87cb24205fba348f857a3af5d08b6336f03fed64"} Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.531495 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" event={"ID":"79858873-af90-4279-82dd-ff3a996bcb30","Type":"ContainerStarted","Data":"be86046b0ee957ffe37c60eb4f437745e8ea954e01c29ac913f3f3c41e84e86c"} Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.532717 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.551205 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" podStartSLOduration=3.720563097 podStartE2EDuration="26.551177882s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.538751175 +0000 UTC m=+1089.300011451" lastFinishedPulling="2026-01-29 13:34:33.36936596 +0000 UTC m=+1112.130626236" observedRunningTime="2026-01-29 13:34:34.546986201 +0000 UTC m=+1113.308246487" watchObservedRunningTime="2026-01-29 13:34:34.551177882 +0000 UTC m=+1113.312438168" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.551811 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-cktt7" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.575643 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-wk5tx" podStartSLOduration=3.6105181440000003 podStartE2EDuration="26.575623855s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.50101141 +0000 UTC m=+1089.262271686" lastFinishedPulling="2026-01-29 13:34:33.466117111 +0000 UTC m=+1112.227377397" observedRunningTime="2026-01-29 13:34:34.57022418 +0000 UTC m=+1113.331484466" watchObservedRunningTime="2026-01-29 13:34:34.575623855 +0000 UTC m=+1113.336884141" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.604407 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" podStartSLOduration=3.781689504 podStartE2EDuration="26.604381621s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.545810508 +0000 UTC m=+1089.307070784" lastFinishedPulling="2026-01-29 13:34:33.368502465 +0000 UTC m=+1112.129762901" observedRunningTime="2026-01-29 13:34:34.602507737 +0000 UTC m=+1113.363768053" watchObservedRunningTime="2026-01-29 13:34:34.604381621 +0000 UTC m=+1113.365641907" Jan 29 13:34:34 crc kubenswrapper[4787]: I0129 13:34:34.628868 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" podStartSLOduration=3.728258828 podStartE2EDuration="26.628840114s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:10.536069978 +0000 UTC m=+1089.297330254" lastFinishedPulling="2026-01-29 13:34:33.436651234 +0000 UTC m=+1112.197911540" observedRunningTime="2026-01-29 13:34:34.62277344 +0000 UTC m=+1113.384033726" watchObservedRunningTime="2026-01-29 13:34:34.628840114 +0000 UTC m=+1113.390100400" Jan 29 13:34:38 crc kubenswrapper[4787]: I0129 13:34:38.894092 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-h4m5x" Jan 29 13:34:39 crc kubenswrapper[4787]: I0129 13:34:39.038314 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-dlk2z" Jan 29 13:34:39 crc kubenswrapper[4787]: I0129 13:34:39.080123 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-7p9r2" Jan 29 13:34:39 crc kubenswrapper[4787]: I0129 13:34:39.129636 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-jn5rl" Jan 29 13:34:39 crc kubenswrapper[4787]: I0129 13:34:39.150430 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-dsk62" Jan 29 13:34:39 crc kubenswrapper[4787]: I0129 13:34:39.494234 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-564965969-57x96" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.560245 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.573342 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6327fc99-6096-4780-8d8d-11d454f09e83-cert\") pod \"openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6\" (UID: \"6327fc99-6096-4780-8d8d-11d454f09e83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.847049 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.968526 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.968590 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.981222 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-webhook-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.983794 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c77e73c8-e6c7-4ae4-be36-4ef845996f9c-metrics-certs\") pod \"openstack-operator-controller-manager-6b6f655c79-smwj6\" (UID: \"c77e73c8-e6c7-4ae4-be36-4ef845996f9c\") " pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:40 crc kubenswrapper[4787]: I0129 13:34:40.998934 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:41 crc kubenswrapper[4787]: I0129 13:34:41.193867 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6"] Jan 29 13:34:41 crc kubenswrapper[4787]: I0129 13:34:41.211702 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:34:41 crc kubenswrapper[4787]: I0129 13:34:41.528018 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6"] Jan 29 13:34:41 crc kubenswrapper[4787]: W0129 13:34:41.529977 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc77e73c8_e6c7_4ae4_be36_4ef845996f9c.slice/crio-7386d68493b8401ee29e72e283a3a597b2fc5ead781f84fa55e01e09a02ba6c5 WatchSource:0}: Error finding container 7386d68493b8401ee29e72e283a3a597b2fc5ead781f84fa55e01e09a02ba6c5: Status 404 returned error can't find the container with id 7386d68493b8401ee29e72e283a3a597b2fc5ead781f84fa55e01e09a02ba6c5 Jan 29 13:34:41 crc kubenswrapper[4787]: I0129 13:34:41.599205 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" event={"ID":"c77e73c8-e6c7-4ae4-be36-4ef845996f9c","Type":"ContainerStarted","Data":"7386d68493b8401ee29e72e283a3a597b2fc5ead781f84fa55e01e09a02ba6c5"} Jan 29 13:34:41 crc kubenswrapper[4787]: I0129 13:34:41.601814 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" event={"ID":"6327fc99-6096-4780-8d8d-11d454f09e83","Type":"ContainerStarted","Data":"48625e13436e480242feb516ee59bee7e956f7aa6424fb4f04a369b16a8e2721"} Jan 29 13:34:42 crc kubenswrapper[4787]: I0129 13:34:42.615324 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" event={"ID":"c77e73c8-e6c7-4ae4-be36-4ef845996f9c","Type":"ContainerStarted","Data":"0d7e513ba5a0aabc7c234f33af0e2b479850d21f6f4f5b25362de7d182c16cf9"} Jan 29 13:34:42 crc kubenswrapper[4787]: I0129 13:34:42.615858 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:42 crc kubenswrapper[4787]: I0129 13:34:42.645820 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" podStartSLOduration=34.645793792 podStartE2EDuration="34.645793792s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:34:42.642080126 +0000 UTC m=+1121.403340412" watchObservedRunningTime="2026-01-29 13:34:42.645793792 +0000 UTC m=+1121.407054088" Jan 29 13:34:45 crc kubenswrapper[4787]: I0129 13:34:45.646624 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" event={"ID":"6327fc99-6096-4780-8d8d-11d454f09e83","Type":"ContainerStarted","Data":"2640eeeda704da6c15250c6fcd836e07ec583ba470ffc16da05c34941f34eaea"} Jan 29 13:34:45 crc kubenswrapper[4787]: I0129 13:34:45.648932 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:45 crc kubenswrapper[4787]: I0129 13:34:45.695763 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" podStartSLOduration=34.174510594 podStartE2EDuration="37.695734446s" podCreationTimestamp="2026-01-29 13:34:08 +0000 UTC" firstStartedPulling="2026-01-29 13:34:41.211149522 +0000 UTC m=+1119.972409808" lastFinishedPulling="2026-01-29 13:34:44.732373384 +0000 UTC m=+1123.493633660" observedRunningTime="2026-01-29 13:34:45.688067106 +0000 UTC m=+1124.449327422" watchObservedRunningTime="2026-01-29 13:34:45.695734446 +0000 UTC m=+1124.456994762" Jan 29 13:34:50 crc kubenswrapper[4787]: I0129 13:34:50.856705 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6" Jan 29 13:34:51 crc kubenswrapper[4787]: I0129 13:34:51.006071 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6b6f655c79-smwj6" Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.394985 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.395808 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.395876 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.396758 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.396864 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a" gracePeriod=600 Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.783382 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a" exitCode=0 Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.783685 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a"} Jan 29 13:34:58 crc kubenswrapper[4787]: I0129 13:34:58.783879 4787 scope.go:117] "RemoveContainer" containerID="2753a5187d40800d90e7784477132e3d3982abbaf428dff98dbd39ac66898a8b" Jan 29 13:34:59 crc kubenswrapper[4787]: I0129 13:34:59.802687 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94"} Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.527902 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.529800 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.535980 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.536141 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.536255 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-jg4nt" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.536362 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.545850 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.606120 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.607543 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.609639 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.625881 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.716173 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxsq4\" (UniqueName: \"kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.716253 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.716291 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc5bq\" (UniqueName: \"kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.716337 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.716358 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.817770 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.817817 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc5bq\" (UniqueName: \"kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.817869 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.817887 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.817933 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxsq4\" (UniqueName: \"kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.819063 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.819212 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.819944 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.840663 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxsq4\" (UniqueName: \"kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4\") pod \"dnsmasq-dns-5f854695bc-d8hqn\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.851808 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc5bq\" (UniqueName: \"kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq\") pod \"dnsmasq-dns-84bb9d8bd9-mqc4b\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:05 crc kubenswrapper[4787]: I0129 13:35:05.925159 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:06 crc kubenswrapper[4787]: I0129 13:35:06.150850 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:06 crc kubenswrapper[4787]: I0129 13:35:06.164205 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:06 crc kubenswrapper[4787]: W0129 13:35:06.177645 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d722581_23f4_4525_819c_6670d7990f2f.slice/crio-b4f7a3781a1cca7a1c2f80eddbd7f68694c26d5c1934cd4618c05904c17220c7 WatchSource:0}: Error finding container b4f7a3781a1cca7a1c2f80eddbd7f68694c26d5c1934cd4618c05904c17220c7: Status 404 returned error can't find the container with id b4f7a3781a1cca7a1c2f80eddbd7f68694c26d5c1934cd4618c05904c17220c7 Jan 29 13:35:06 crc kubenswrapper[4787]: I0129 13:35:06.768751 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:06 crc kubenswrapper[4787]: I0129 13:35:06.859345 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" event={"ID":"8d722581-23f4-4525-819c-6670d7990f2f","Type":"ContainerStarted","Data":"b4f7a3781a1cca7a1c2f80eddbd7f68694c26d5c1934cd4618c05904c17220c7"} Jan 29 13:35:06 crc kubenswrapper[4787]: I0129 13:35:06.860851 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" event={"ID":"acec1ff8-f4ae-4575-ba00-ff8c2ee48005","Type":"ContainerStarted","Data":"895541f8d786a448786b4e814c5b68b70bcad5172924f96cde40851755eba5e8"} Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.384053 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.417931 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.419004 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.438821 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.478514 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dnqr\" (UniqueName: \"kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.478591 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.478635 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.579120 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dnqr\" (UniqueName: \"kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.579187 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.579234 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.580139 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.580959 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.626598 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dnqr\" (UniqueName: \"kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr\") pod \"dnsmasq-dns-744ffd65bc-vjdqz\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.746281 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.895590 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.915267 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.916299 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:08 crc kubenswrapper[4787]: I0129 13:35:08.939907 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.087262 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.087693 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hzsk\" (UniqueName: \"kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.087721 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.189006 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.189476 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.189556 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hzsk\" (UniqueName: \"kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.189588 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.191000 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.191473 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.220661 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hzsk\" (UniqueName: \"kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk\") pod \"dnsmasq-dns-95f5f6995-qg99s\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.253542 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.646442 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.649154 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.651584 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.651886 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-kxjj4" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.652022 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.652193 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.652296 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.652392 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.652538 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.653492 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.754177 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:09 crc kubenswrapper[4787]: W0129 13:35:09.756000 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a052e21_d0ff_4b83_9d37_77ba7bbb808f.slice/crio-6100a57bb8ed9d69cedff9dde82722d262dddcfd349276175b370fda19884b40 WatchSource:0}: Error finding container 6100a57bb8ed9d69cedff9dde82722d262dddcfd349276175b370fda19884b40: Status 404 returned error can't find the container with id 6100a57bb8ed9d69cedff9dde82722d262dddcfd349276175b370fda19884b40 Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798517 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798561 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798588 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798613 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798640 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798659 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798678 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798717 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798744 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798766 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.798781 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c8lf\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900365 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900434 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900489 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900523 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900559 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900582 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900605 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900657 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900691 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900721 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.900743 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c8lf\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.901834 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.902084 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.902644 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.903874 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.904209 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.907445 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.907731 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.908050 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.912013 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" event={"ID":"8703a574-800c-4f20-90d3-54027deb24a5","Type":"ContainerStarted","Data":"4b57774421436b3a0d1241ea0d8ad4d15312e93c1bd10b4539a7922f3f247a13"} Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.912084 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.918671 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" event={"ID":"3a052e21-d0ff-4b83-9d37-77ba7bbb808f","Type":"ContainerStarted","Data":"6100a57bb8ed9d69cedff9dde82722d262dddcfd349276175b370fda19884b40"} Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.920113 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.920651 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c8lf\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.925027 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " pod="openstack/rabbitmq-server-0" Jan 29 13:35:09 crc kubenswrapper[4787]: I0129 13:35:09.975881 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.068384 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.071409 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.073061 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.073320 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.076033 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.076762 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.076924 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.077013 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.077157 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-pb7c4" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.078009 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213565 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213609 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213642 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213847 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mfc5\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213877 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.213916 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.214008 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.214079 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.214225 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.214262 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.214289 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.313891 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322544 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322667 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mfc5\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322697 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322725 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322744 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322770 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322847 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322878 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322901 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322938 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.322956 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.323571 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.323970 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.324482 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.325506 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.325811 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.326910 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.328934 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.329309 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.329971 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.331611 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.344889 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mfc5\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.354137 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.419750 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.862936 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:35:10 crc kubenswrapper[4787]: W0129 13:35:10.875680 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6285155e_2d1b_4c6f_be33_5f2681a7b5e0.slice/crio-a477b250288ea993e4f89842a04898f7f4cf6530724bf70b13e4d844bd478e04 WatchSource:0}: Error finding container a477b250288ea993e4f89842a04898f7f4cf6530724bf70b13e4d844bd478e04: Status 404 returned error can't find the container with id a477b250288ea993e4f89842a04898f7f4cf6530724bf70b13e4d844bd478e04 Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.943648 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerStarted","Data":"a477b250288ea993e4f89842a04898f7f4cf6530724bf70b13e4d844bd478e04"} Jan 29 13:35:10 crc kubenswrapper[4787]: I0129 13:35:10.945125 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerStarted","Data":"d3be32187af9f191f0daadc8b03aa3cf2809d35ae4ae9654f8696fe9eb1589dd"} Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.231429 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.232561 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.236586 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.239956 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-tsn66" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.240180 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.240394 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.249842 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.265035 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346047 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346089 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346125 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346154 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346175 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346204 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346221 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l944v\" (UniqueName: \"kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.346283 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447426 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447515 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447544 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447577 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447605 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447623 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447652 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.447667 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l944v\" (UniqueName: \"kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.448285 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.449497 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.449845 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.450372 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.454477 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.461568 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.464150 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.465138 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l944v\" (UniqueName: \"kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.480281 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " pod="openstack/openstack-galera-0" Jan 29 13:35:11 crc kubenswrapper[4787]: I0129 13:35:11.551385 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.193978 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:35:12 crc kubenswrapper[4787]: W0129 13:35:12.209623 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b3f405a_2fa1_4afe_8364_60489fc271ca.slice/crio-3031543447d738394c139cd3721f31533505ba3717a0f738e40b447dd668095d WatchSource:0}: Error finding container 3031543447d738394c139cd3721f31533505ba3717a0f738e40b447dd668095d: Status 404 returned error can't find the container with id 3031543447d738394c139cd3721f31533505ba3717a0f738e40b447dd668095d Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.708035 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.709813 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.712363 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.712865 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.712367 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-jlx4x" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.713787 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.734169 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.771719 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.771796 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.771878 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.771928 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.771953 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.772008 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.772043 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l65h\" (UniqueName: \"kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.772094 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873137 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l65h\" (UniqueName: \"kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873191 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873219 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873245 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873289 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873377 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873407 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.873439 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.875494 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.876213 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.879322 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.879381 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.880646 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.889556 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.898129 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.900248 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l65h\" (UniqueName: \"kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.925149 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.944379 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.945607 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.949041 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.949285 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qrxfb" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.949524 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.955398 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.976962 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.977011 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.977040 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.977102 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qjnt\" (UniqueName: \"kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:12 crc kubenswrapper[4787]: I0129 13:35:12.977153 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.022396 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerStarted","Data":"3031543447d738394c139cd3721f31533505ba3717a0f738e40b447dd668095d"} Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.048764 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.078880 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.078941 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.078974 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.079021 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qjnt\" (UniqueName: \"kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.079075 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.079801 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.080294 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.117189 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.125315 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qjnt\" (UniqueName: \"kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.129141 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.318410 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.737211 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:35:13 crc kubenswrapper[4787]: I0129 13:35:13.882749 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.548739 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.551054 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.560449 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-lvk9x" Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.607432 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.619391 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5h6d\" (UniqueName: \"kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d\") pod \"kube-state-metrics-0\" (UID: \"baf39877-8374-4f5a-91a6-60b55b5d6514\") " pod="openstack/kube-state-metrics-0" Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.720442 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5h6d\" (UniqueName: \"kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d\") pod \"kube-state-metrics-0\" (UID: \"baf39877-8374-4f5a-91a6-60b55b5d6514\") " pod="openstack/kube-state-metrics-0" Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.757627 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5h6d\" (UniqueName: \"kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d\") pod \"kube-state-metrics-0\" (UID: \"baf39877-8374-4f5a-91a6-60b55b5d6514\") " pod="openstack/kube-state-metrics-0" Jan 29 13:35:14 crc kubenswrapper[4787]: I0129 13:35:14.887405 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.122344 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerStarted","Data":"694b58bc8dcf98befd85af5928ae9cc780331b3fd586819fe23110c2a890d891"} Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.123171 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d95df36d-a737-4136-8921-01fe4e028add","Type":"ContainerStarted","Data":"838949beaab6d850d64bfa417ae4b78437f8aff3a1873bad8750a4b6d346a500"} Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.297138 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.298043 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.303921 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-ml52f" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.304184 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.304833 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.314579 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.331597 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.333367 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.372885 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.372951 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.372976 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373009 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn6ll\" (UniqueName: \"kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373029 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373051 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373065 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5gnz\" (UniqueName: \"kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373099 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373121 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373150 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373169 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373200 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.373213 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.376045 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.474521 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475041 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475248 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475280 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475378 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475433 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475481 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475576 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475606 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475658 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn6ll\" (UniqueName: \"kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475689 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475724 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.475745 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5gnz\" (UniqueName: \"kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.476327 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.476383 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.476559 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.476803 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.481407 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.486383 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.486767 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.486901 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.488878 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.489089 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.500142 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5gnz\" (UniqueName: \"kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.505791 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs\") pod \"ovn-controller-hz6gf\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.511656 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn6ll\" (UniqueName: \"kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll\") pod \"ovn-controller-ovs-2xr6j\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.634023 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:18 crc kubenswrapper[4787]: I0129 13:35:18.670697 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.188518 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.190159 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.194821 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.196302 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.196518 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-z5k9p" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.197099 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.197559 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.203964 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293298 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293409 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293464 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8z9m\" (UniqueName: \"kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293574 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293616 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293663 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293690 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.293720 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397224 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397298 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8z9m\" (UniqueName: \"kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397360 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397405 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397494 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397529 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397570 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397625 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.397753 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.398777 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.398782 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.408192 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.408292 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.412264 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.413218 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.416799 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8z9m\" (UniqueName: \"kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.426252 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:19 crc kubenswrapper[4787]: I0129 13:35:19.513185 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:20 crc kubenswrapper[4787]: I0129 13:35:20.809591 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.534106 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.537826 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.554015 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.563421 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.563764 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-qcggc" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.564620 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.570284 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.732490 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.732603 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.732690 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.733014 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.733098 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.733166 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glbbf\" (UniqueName: \"kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.733230 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.733272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.834858 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glbbf\" (UniqueName: \"kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.834959 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.834986 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835045 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835077 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835117 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835154 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835171 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.835763 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.836306 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.837265 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.837423 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.848921 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.854209 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.864914 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.865410 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glbbf\" (UniqueName: \"kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.867248 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:21 crc kubenswrapper[4787]: I0129 13:35:21.893990 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:27 crc kubenswrapper[4787]: W0129 13:35:27.503686 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbaf39877_8374_4f5a_91a6_60b55b5d6514.slice/crio-9877b5ffc153f60bcd31450a3631fe086ad55d52c2598b7bb3be1fa68af9fbcf WatchSource:0}: Error finding container 9877b5ffc153f60bcd31450a3631fe086ad55d52c2598b7bb3be1fa68af9fbcf: Status 404 returned error can't find the container with id 9877b5ffc153f60bcd31450a3631fe086ad55d52c2598b7bb3be1fa68af9fbcf Jan 29 13:35:27 crc kubenswrapper[4787]: E0129 13:35:27.536505 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 29 13:35:27 crc kubenswrapper[4787]: E0129 13:35:27.536868 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l944v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(7b3f405a-2fa1-4afe-8364-60489fc271ca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:27 crc kubenswrapper[4787]: E0129 13:35:27.538502 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" Jan 29 13:35:28 crc kubenswrapper[4787]: I0129 13:35:28.212347 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"baf39877-8374-4f5a-91a6-60b55b5d6514","Type":"ContainerStarted","Data":"9877b5ffc153f60bcd31450a3631fe086ad55d52c2598b7bb3be1fa68af9fbcf"} Jan 29 13:35:28 crc kubenswrapper[4787]: E0129 13:35:28.215301 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="openstack/openstack-galera-0" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" Jan 29 13:35:39 crc kubenswrapper[4787]: E0129 13:35:39.532057 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc" Jan 29 13:35:39 crc kubenswrapper[4787]: E0129 13:35:39.533070 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n77h5dch65bh656h5b9hcfh677hf6hf8h668h66dh657h7bh699hdfhb8hc6hb6h97h84h578hbdh54ch597h54dh655h575hb9h54h565h555h655q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2qjnt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(d95df36d-a737-4136-8921-01fe4e028add): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:39 crc kubenswrapper[4787]: E0129 13:35:39.536103 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="d95df36d-a737-4136-8921-01fe4e028add" Jan 29 13:35:39 crc kubenswrapper[4787]: I0129 13:35:39.842344 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.306360 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc\\\"\"" pod="openstack/memcached-0" podUID="d95df36d-a737-4136-8921-01fe4e028add" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.406004 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.406185 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sc5bq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-mqc4b_openstack(acec1ff8-f4ae-4575-ba00-ff8c2ee48005): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.407908 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" podUID="acec1ff8-f4ae-4575-ba00-ff8c2ee48005" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.436553 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.436906 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9hzsk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-95f5f6995-qg99s_openstack(3a052e21-d0ff-4b83-9d37-77ba7bbb808f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.438088 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" podUID="3a052e21-d0ff-4b83-9d37-77ba7bbb808f" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.453570 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.453708 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rxsq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-d8hqn_openstack(8d722581-23f4-4525-819c-6670d7990f2f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.454998 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" podUID="8d722581-23f4-4525-819c-6670d7990f2f" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.465561 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.465719 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2dnqr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-vjdqz_openstack(8703a574-800c-4f20-90d3-54027deb24a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:35:40 crc kubenswrapper[4787]: E0129 13:35:40.468363 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" podUID="8703a574-800c-4f20-90d3-54027deb24a5" Jan 29 13:35:40 crc kubenswrapper[4787]: I0129 13:35:40.654178 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:35:40 crc kubenswrapper[4787]: W0129 13:35:40.748037 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod383ed8f7_22dd_49b6_a932_6425cc62a6d1.slice/crio-e1432302026dec057fd0a96fc8b4819324643fd29b0afe8cf0a9bdb83ca793c2 WatchSource:0}: Error finding container e1432302026dec057fd0a96fc8b4819324643fd29b0afe8cf0a9bdb83ca793c2: Status 404 returned error can't find the container with id e1432302026dec057fd0a96fc8b4819324643fd29b0afe8cf0a9bdb83ca793c2 Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.020893 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:35:41 crc kubenswrapper[4787]: W0129 13:35:41.030480 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod213bfa86_f7a6_48b4_94a0_328352f00e75.slice/crio-852487611cc4f8e0e2564c9407ef1ef49a62a00cf0a0904263eef3002495e54a WatchSource:0}: Error finding container 852487611cc4f8e0e2564c9407ef1ef49a62a00cf0a0904263eef3002495e54a: Status 404 returned error can't find the container with id 852487611cc4f8e0e2564c9407ef1ef49a62a00cf0a0904263eef3002495e54a Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.142426 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.312869 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerStarted","Data":"dd14c5768d74958787edaa836d7c2b1953728c25d91145ffdb2c08ce3af53bdf"} Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.317185 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerStarted","Data":"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1"} Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.318423 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerStarted","Data":"852487611cc4f8e0e2564c9407ef1ef49a62a00cf0a0904263eef3002495e54a"} Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.319651 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf" event={"ID":"383ed8f7-22dd-49b6-a932-6425cc62a6d1","Type":"ContainerStarted","Data":"e1432302026dec057fd0a96fc8b4819324643fd29b0afe8cf0a9bdb83ca793c2"} Jan 29 13:35:41 crc kubenswrapper[4787]: E0129 13:35:41.322288 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33\\\"\"" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" podUID="8703a574-800c-4f20-90d3-54027deb24a5" Jan 29 13:35:41 crc kubenswrapper[4787]: E0129 13:35:41.322342 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33\\\"\"" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" podUID="3a052e21-d0ff-4b83-9d37-77ba7bbb808f" Jan 29 13:35:41 crc kubenswrapper[4787]: W0129 13:35:41.368037 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode91c97aa_9ab9_47e6_9821_22ee20dff312.slice/crio-8cdaa16f73cf5fdfa4959221a0456ebdfb93648708bc35db169fadd941dd3c46 WatchSource:0}: Error finding container 8cdaa16f73cf5fdfa4959221a0456ebdfb93648708bc35db169fadd941dd3c46: Status 404 returned error can't find the container with id 8cdaa16f73cf5fdfa4959221a0456ebdfb93648708bc35db169fadd941dd3c46 Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.674908 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.675944 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.683796 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.700438 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768504 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768565 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msd4x\" (UniqueName: \"kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768595 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768761 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768825 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.768875 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.811366 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.823059 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.826216 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.832998 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.833640 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871118 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871171 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871199 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871307 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msd4x\" (UniqueName: \"kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.871348 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.872101 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.872134 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.872787 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.880345 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.881454 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.899042 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msd4x\" (UniqueName: \"kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x\") pod \"ovn-controller-metrics-wpb6r\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.975700 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.975774 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.975847 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9dpn\" (UniqueName: \"kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.975867 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:41 crc kubenswrapper[4787]: I0129 13:35:41.978271 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.004636 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.016502 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.018098 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.020368 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.026691 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.078095 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.078134 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.078209 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9dpn\" (UniqueName: \"kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.078231 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.079029 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.079552 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.080036 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.106851 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.110254 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9dpn\" (UniqueName: \"kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn\") pod \"dnsmasq-dns-7878659675-mnv6q\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.128030 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.156087 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.178921 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config\") pod \"8d722581-23f4-4525-819c-6670d7990f2f\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179056 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxsq4\" (UniqueName: \"kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4\") pod \"8d722581-23f4-4525-819c-6670d7990f2f\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179137 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc\") pod \"8d722581-23f4-4525-819c-6670d7990f2f\" (UID: \"8d722581-23f4-4525-819c-6670d7990f2f\") " Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179333 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179412 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179439 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179505 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddf76\" (UniqueName: \"kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.179572 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.182945 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config" (OuterVolumeSpecName: "config") pod "8d722581-23f4-4525-819c-6670d7990f2f" (UID: "8d722581-23f4-4525-819c-6670d7990f2f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.184944 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d722581-23f4-4525-819c-6670d7990f2f" (UID: "8d722581-23f4-4525-819c-6670d7990f2f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.185402 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4" (OuterVolumeSpecName: "kube-api-access-rxsq4") pod "8d722581-23f4-4525-819c-6670d7990f2f" (UID: "8d722581-23f4-4525-819c-6670d7990f2f"). InnerVolumeSpecName "kube-api-access-rxsq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.280858 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc5bq\" (UniqueName: \"kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq\") pod \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.280969 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config\") pod \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\" (UID: \"acec1ff8-f4ae-4575-ba00-ff8c2ee48005\") " Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281192 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281231 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281290 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281309 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281346 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddf76\" (UniqueName: \"kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281394 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxsq4\" (UniqueName: \"kubernetes.io/projected/8d722581-23f4-4525-819c-6670d7990f2f-kube-api-access-rxsq4\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281407 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.281416 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d722581-23f4-4525-819c-6670d7990f2f-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.282081 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config" (OuterVolumeSpecName: "config") pod "acec1ff8-f4ae-4575-ba00-ff8c2ee48005" (UID: "acec1ff8-f4ae-4575-ba00-ff8c2ee48005"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.282281 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.282362 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.282481 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.282688 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.294760 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq" (OuterVolumeSpecName: "kube-api-access-sc5bq") pod "acec1ff8-f4ae-4575-ba00-ff8c2ee48005" (UID: "acec1ff8-f4ae-4575-ba00-ff8c2ee48005"). InnerVolumeSpecName "kube-api-access-sc5bq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.297747 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddf76\" (UniqueName: \"kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76\") pod \"dnsmasq-dns-586b989cdc-stgl5\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.327218 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.327684 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-mqc4b" event={"ID":"acec1ff8-f4ae-4575-ba00-ff8c2ee48005","Type":"ContainerDied","Data":"895541f8d786a448786b4e814c5b68b70bcad5172924f96cde40851755eba5e8"} Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.330238 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerStarted","Data":"a6c0ad3143ab1f25e2f8fefaf9710ebb5e0dda180c2744c012d09871106cf7a3"} Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.333954 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerStarted","Data":"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413"} Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.339260 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.339734 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-d8hqn" event={"ID":"8d722581-23f4-4525-819c-6670d7990f2f","Type":"ContainerDied","Data":"b4f7a3781a1cca7a1c2f80eddbd7f68694c26d5c1934cd4618c05904c17220c7"} Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.343258 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerStarted","Data":"8cdaa16f73cf5fdfa4959221a0456ebdfb93648708bc35db169fadd941dd3c46"} Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.384207 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.384234 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc5bq\" (UniqueName: \"kubernetes.io/projected/acec1ff8-f4ae-4575-ba00-ff8c2ee48005-kube-api-access-sc5bq\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.410736 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.516199 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.594770 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-d8hqn"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.637065 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:42 crc kubenswrapper[4787]: I0129 13:35:42.652104 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-mqc4b"] Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.037803 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.043345 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.048165 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.354141 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerStarted","Data":"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275"} Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.774405 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.784649 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955432 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config\") pod \"8703a574-800c-4f20-90d3-54027deb24a5\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955484 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config\") pod \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955505 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dnqr\" (UniqueName: \"kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr\") pod \"8703a574-800c-4f20-90d3-54027deb24a5\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955538 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hzsk\" (UniqueName: \"kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk\") pod \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955610 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc\") pod \"8703a574-800c-4f20-90d3-54027deb24a5\" (UID: \"8703a574-800c-4f20-90d3-54027deb24a5\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.955659 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc\") pod \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\" (UID: \"3a052e21-d0ff-4b83-9d37-77ba7bbb808f\") " Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.956513 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3a052e21-d0ff-4b83-9d37-77ba7bbb808f" (UID: "3a052e21-d0ff-4b83-9d37-77ba7bbb808f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.956902 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8703a574-800c-4f20-90d3-54027deb24a5" (UID: "8703a574-800c-4f20-90d3-54027deb24a5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.957108 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config" (OuterVolumeSpecName: "config") pod "8703a574-800c-4f20-90d3-54027deb24a5" (UID: "8703a574-800c-4f20-90d3-54027deb24a5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.957291 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config" (OuterVolumeSpecName: "config") pod "3a052e21-d0ff-4b83-9d37-77ba7bbb808f" (UID: "3a052e21-d0ff-4b83-9d37-77ba7bbb808f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.962843 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk" (OuterVolumeSpecName: "kube-api-access-9hzsk") pod "3a052e21-d0ff-4b83-9d37-77ba7bbb808f" (UID: "3a052e21-d0ff-4b83-9d37-77ba7bbb808f"). InnerVolumeSpecName "kube-api-access-9hzsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.965747 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr" (OuterVolumeSpecName: "kube-api-access-2dnqr") pod "8703a574-800c-4f20-90d3-54027deb24a5" (UID: "8703a574-800c-4f20-90d3-54027deb24a5"). InnerVolumeSpecName "kube-api-access-2dnqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.994569 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d722581-23f4-4525-819c-6670d7990f2f" path="/var/lib/kubelet/pods/8d722581-23f4-4525-819c-6670d7990f2f/volumes" Jan 29 13:35:43 crc kubenswrapper[4787]: I0129 13:35:43.995011 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acec1ff8-f4ae-4575-ba00-ff8c2ee48005" path="/var/lib/kubelet/pods/acec1ff8-f4ae-4575-ba00-ff8c2ee48005/volumes" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057559 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057586 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057596 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057606 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dnqr\" (UniqueName: \"kubernetes.io/projected/8703a574-800c-4f20-90d3-54027deb24a5-kube-api-access-2dnqr\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057615 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hzsk\" (UniqueName: \"kubernetes.io/projected/3a052e21-d0ff-4b83-9d37-77ba7bbb808f-kube-api-access-9hzsk\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.057624 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8703a574-800c-4f20-90d3-54027deb24a5-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.362435 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wpb6r" event={"ID":"1a18d2e9-35be-4d8f-9d13-08296cfa2963","Type":"ContainerStarted","Data":"44a951af89e5870f79973e69b646b34901416c5ba1ac974713e2e57603818127"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.363786 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"baf39877-8374-4f5a-91a6-60b55b5d6514","Type":"ContainerStarted","Data":"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.363890 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.365830 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" event={"ID":"8703a574-800c-4f20-90d3-54027deb24a5","Type":"ContainerDied","Data":"4b57774421436b3a0d1241ea0d8ad4d15312e93c1bd10b4539a7922f3f247a13"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.365878 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-vjdqz" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.367106 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.367110 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-qg99s" event={"ID":"3a052e21-d0ff-4b83-9d37-77ba7bbb808f","Type":"ContainerDied","Data":"6100a57bb8ed9d69cedff9dde82722d262dddcfd349276175b370fda19884b40"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.368622 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-mnv6q" event={"ID":"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f","Type":"ContainerStarted","Data":"83092091b62a90e06f7ad1fd69da22e717445e17ad2fd1ff25db0e3211bc8e2d"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.369920 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" event={"ID":"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16","Type":"ContainerStarted","Data":"480bf948802b065e810f1873e0dd247c961894a089287a02d7f759659cf6424d"} Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.396391 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=14.100916657 podStartE2EDuration="30.396378929s" podCreationTimestamp="2026-01-29 13:35:14 +0000 UTC" firstStartedPulling="2026-01-29 13:35:27.511238978 +0000 UTC m=+1166.272499274" lastFinishedPulling="2026-01-29 13:35:43.80670127 +0000 UTC m=+1182.567961546" observedRunningTime="2026-01-29 13:35:44.391515627 +0000 UTC m=+1183.152775903" watchObservedRunningTime="2026-01-29 13:35:44.396378929 +0000 UTC m=+1183.157639205" Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.425475 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.427047 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-vjdqz"] Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.447512 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:44 crc kubenswrapper[4787]: I0129 13:35:44.452730 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-qg99s"] Jan 29 13:35:45 crc kubenswrapper[4787]: I0129 13:35:45.395710 4787 generic.go:334] "Generic (PLEG): container finished" podID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerID="31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1" exitCode=0 Jan 29 13:35:45 crc kubenswrapper[4787]: I0129 13:35:45.395828 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerDied","Data":"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1"} Jan 29 13:35:46 crc kubenswrapper[4787]: I0129 13:35:46.001362 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a052e21-d0ff-4b83-9d37-77ba7bbb808f" path="/var/lib/kubelet/pods/3a052e21-d0ff-4b83-9d37-77ba7bbb808f/volumes" Jan 29 13:35:46 crc kubenswrapper[4787]: I0129 13:35:46.002228 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8703a574-800c-4f20-90d3-54027deb24a5" path="/var/lib/kubelet/pods/8703a574-800c-4f20-90d3-54027deb24a5/volumes" Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.416433 4787 generic.go:334] "Generic (PLEG): container finished" podID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerID="bfcbe8d49851c9ea391b6ad1eed625616efe4028f3faea7bd0158cc4417c4ab3" exitCode=0 Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.416581 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" event={"ID":"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16","Type":"ContainerDied","Data":"bfcbe8d49851c9ea391b6ad1eed625616efe4028f3faea7bd0158cc4417c4ab3"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.418662 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerStarted","Data":"76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.424800 4787 generic.go:334] "Generic (PLEG): container finished" podID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerID="9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275" exitCode=0 Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.424882 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerDied","Data":"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.427696 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerStarted","Data":"043d0ca5869b624c5c827973b5831c73dd8054e384abffecf0ed9cf48cb278f0"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.437080 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerStarted","Data":"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.439162 4787 generic.go:334] "Generic (PLEG): container finished" podID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerID="1233336481d0ba05ac8394b5e90213caf24b9a059da5f8a98c48bb5d1c70ff2b" exitCode=0 Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.439222 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerDied","Data":"1233336481d0ba05ac8394b5e90213caf24b9a059da5f8a98c48bb5d1c70ff2b"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.447565 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf" event={"ID":"383ed8f7-22dd-49b6-a932-6425cc62a6d1","Type":"ContainerStarted","Data":"017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.447723 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-hz6gf" Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.448979 4787 generic.go:334] "Generic (PLEG): container finished" podID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerID="6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8" exitCode=0 Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.449008 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-mnv6q" event={"ID":"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f","Type":"ContainerDied","Data":"6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8"} Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.495602 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=14.113099268 podStartE2EDuration="36.495583217s" podCreationTimestamp="2026-01-29 13:35:11 +0000 UTC" firstStartedPulling="2026-01-29 13:35:18.02072732 +0000 UTC m=+1156.781987616" lastFinishedPulling="2026-01-29 13:35:40.403211269 +0000 UTC m=+1179.164471565" observedRunningTime="2026-01-29 13:35:47.471810989 +0000 UTC m=+1186.233071285" watchObservedRunningTime="2026-01-29 13:35:47.495583217 +0000 UTC m=+1186.256843493" Jan 29 13:35:47 crc kubenswrapper[4787]: I0129 13:35:47.556986 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hz6gf" podStartSLOduration=23.941181825 podStartE2EDuration="29.556966754s" podCreationTimestamp="2026-01-29 13:35:18 +0000 UTC" firstStartedPulling="2026-01-29 13:35:40.750399642 +0000 UTC m=+1179.511659918" lastFinishedPulling="2026-01-29 13:35:46.366184531 +0000 UTC m=+1185.127444847" observedRunningTime="2026-01-29 13:35:47.554505957 +0000 UTC m=+1186.315766223" watchObservedRunningTime="2026-01-29 13:35:47.556966754 +0000 UTC m=+1186.318227030" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.457748 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wpb6r" event={"ID":"1a18d2e9-35be-4d8f-9d13-08296cfa2963","Type":"ContainerStarted","Data":"da4713d4a7d29b71f50eb1206c38223a024fe96ef15d57aa224e3edbf3ee1b4e"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.460678 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerStarted","Data":"dc37f2e0d9cdd587ea0cfeec9b06226a2300a64f38c57be961c897d05d7498a1"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.463504 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerStarted","Data":"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.465587 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerStarted","Data":"898e2a89c97b0d73e3b5a788305880e3b3f59cb25679762400e781c5389d9cd4"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.468236 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerStarted","Data":"58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.468262 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerStarted","Data":"eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.468618 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.468709 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.470886 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-mnv6q" event={"ID":"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f","Type":"ContainerStarted","Data":"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.471631 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.473905 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" event={"ID":"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16","Type":"ContainerStarted","Data":"c3dea469118180458bfbe46e770e1fb9c15e0b551961c69cd7dcd6c6ea961cd2"} Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.474019 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.483058 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wpb6r" podStartSLOduration=3.568885691 podStartE2EDuration="7.483024007s" podCreationTimestamp="2026-01-29 13:35:41 +0000 UTC" firstStartedPulling="2026-01-29 13:35:43.711761339 +0000 UTC m=+1182.473021615" lastFinishedPulling="2026-01-29 13:35:47.625899655 +0000 UTC m=+1186.387159931" observedRunningTime="2026-01-29 13:35:48.477816317 +0000 UTC m=+1187.239076593" watchObservedRunningTime="2026-01-29 13:35:48.483024007 +0000 UTC m=+1187.244284283" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.504064 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=22.252435859 podStartE2EDuration="28.504042482s" podCreationTimestamp="2026-01-29 13:35:20 +0000 UTC" firstStartedPulling="2026-01-29 13:35:41.377729531 +0000 UTC m=+1180.138989807" lastFinishedPulling="2026-01-29 13:35:47.629336154 +0000 UTC m=+1186.390596430" observedRunningTime="2026-01-29 13:35:48.496127469 +0000 UTC m=+1187.257387735" watchObservedRunningTime="2026-01-29 13:35:48.504042482 +0000 UTC m=+1187.265302748" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.534654 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7878659675-mnv6q" podStartSLOduration=4.8724173650000004 podStartE2EDuration="7.534633918s" podCreationTimestamp="2026-01-29 13:35:41 +0000 UTC" firstStartedPulling="2026-01-29 13:35:43.709413974 +0000 UTC m=+1182.470674250" lastFinishedPulling="2026-01-29 13:35:46.371630507 +0000 UTC m=+1185.132890803" observedRunningTime="2026-01-29 13:35:48.529777376 +0000 UTC m=+1187.291037672" watchObservedRunningTime="2026-01-29 13:35:48.534633918 +0000 UTC m=+1187.295894194" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.572430 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-2xr6j" podStartSLOduration=25.275235665 podStartE2EDuration="30.57240988s" podCreationTimestamp="2026-01-29 13:35:18 +0000 UTC" firstStartedPulling="2026-01-29 13:35:41.032593585 +0000 UTC m=+1179.793853861" lastFinishedPulling="2026-01-29 13:35:46.3297678 +0000 UTC m=+1185.091028076" observedRunningTime="2026-01-29 13:35:48.56723036 +0000 UTC m=+1187.328490646" watchObservedRunningTime="2026-01-29 13:35:48.57240988 +0000 UTC m=+1187.333670166" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.586925 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" podStartSLOduration=4.927317113 podStartE2EDuration="7.586902694s" podCreationTimestamp="2026-01-29 13:35:41 +0000 UTC" firstStartedPulling="2026-01-29 13:35:43.709302792 +0000 UTC m=+1182.470563068" lastFinishedPulling="2026-01-29 13:35:46.368888363 +0000 UTC m=+1185.130148649" observedRunningTime="2026-01-29 13:35:48.549595893 +0000 UTC m=+1187.310856169" watchObservedRunningTime="2026-01-29 13:35:48.586902694 +0000 UTC m=+1187.348162980" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.595887 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371998.258913 podStartE2EDuration="38.595862861s" podCreationTimestamp="2026-01-29 13:35:10 +0000 UTC" firstStartedPulling="2026-01-29 13:35:12.213126474 +0000 UTC m=+1150.974386760" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:35:48.593954717 +0000 UTC m=+1187.355214993" watchObservedRunningTime="2026-01-29 13:35:48.595862861 +0000 UTC m=+1187.357123137" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.616042 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=23.384321613 podStartE2EDuration="30.616023767s" podCreationTimestamp="2026-01-29 13:35:18 +0000 UTC" firstStartedPulling="2026-01-29 13:35:40.396838132 +0000 UTC m=+1179.158098418" lastFinishedPulling="2026-01-29 13:35:47.628540296 +0000 UTC m=+1186.389800572" observedRunningTime="2026-01-29 13:35:48.611654756 +0000 UTC m=+1187.372915052" watchObservedRunningTime="2026-01-29 13:35:48.616023767 +0000 UTC m=+1187.377284033" Jan 29 13:35:48 crc kubenswrapper[4787]: I0129 13:35:48.894873 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:49 crc kubenswrapper[4787]: I0129 13:35:49.514128 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:49 crc kubenswrapper[4787]: I0129 13:35:49.514173 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:49 crc kubenswrapper[4787]: I0129 13:35:49.588881 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:49 crc kubenswrapper[4787]: E0129 13:35:49.999699 4787 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.203:34498->38.102.83.203:43259: write tcp 38.102.83.203:34498->38.102.83.203:43259: write: broken pipe Jan 29 13:35:51 crc kubenswrapper[4787]: I0129 13:35:51.553479 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 29 13:35:51 crc kubenswrapper[4787]: I0129 13:35:51.553870 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 29 13:35:51 crc kubenswrapper[4787]: I0129 13:35:51.565730 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 29 13:35:51 crc kubenswrapper[4787]: I0129 13:35:51.894922 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:51 crc kubenswrapper[4787]: I0129 13:35:51.934383 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.157670 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.412649 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.457628 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.507889 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7878659675-mnv6q" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="dnsmasq-dns" containerID="cri-o://79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186" gracePeriod=10 Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.543834 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.707073 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.709433 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.713071 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.713330 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.713555 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.717307 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-jfgqb" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.722084 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822228 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822312 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822373 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822395 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822421 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822500 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nwwz\" (UniqueName: \"kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.822545 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923506 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nwwz\" (UniqueName: \"kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923612 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923670 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923757 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923818 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923836 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.923853 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.924485 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.924961 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.925116 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.932998 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.942143 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.946764 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:52 crc kubenswrapper[4787]: I0129 13:35:52.947698 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nwwz\" (UniqueName: \"kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz\") pod \"ovn-northd-0\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " pod="openstack/ovn-northd-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.035199 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.039220 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.050230 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.050267 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.128130 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb\") pod \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.128222 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc\") pod \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.128322 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9dpn\" (UniqueName: \"kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn\") pod \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.128442 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config\") pod \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\" (UID: \"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f\") " Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.143758 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn" (OuterVolumeSpecName: "kube-api-access-n9dpn") pod "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" (UID: "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f"). InnerVolumeSpecName "kube-api-access-n9dpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.178051 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" (UID: "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.178664 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" (UID: "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.183152 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config" (OuterVolumeSpecName: "config") pod "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" (UID: "dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.218075 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.230644 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9dpn\" (UniqueName: \"kubernetes.io/projected/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-kube-api-access-n9dpn\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.230668 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.230677 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.230685 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.517177 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.523348 4787 generic.go:334] "Generic (PLEG): container finished" podID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerID="79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186" exitCode=0 Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.524116 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-mnv6q" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.525986 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-mnv6q" event={"ID":"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f","Type":"ContainerDied","Data":"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186"} Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.526012 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-mnv6q" event={"ID":"dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f","Type":"ContainerDied","Data":"83092091b62a90e06f7ad1fd69da22e717445e17ad2fd1ff25db0e3211bc8e2d"} Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.526029 4787 scope.go:117] "RemoveContainer" containerID="79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186" Jan 29 13:35:53 crc kubenswrapper[4787]: W0129 13:35:53.529852 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcdeb3ae9_0105_40e4_889d_7d9ab0be4427.slice/crio-bb8343c68390c1419fd28be0f24f8130ffd16f339fefc6537ba09f0aebc484bc WatchSource:0}: Error finding container bb8343c68390c1419fd28be0f24f8130ffd16f339fefc6537ba09f0aebc484bc: Status 404 returned error can't find the container with id bb8343c68390c1419fd28be0f24f8130ffd16f339fefc6537ba09f0aebc484bc Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.551544 4787 scope.go:117] "RemoveContainer" containerID="6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.595986 4787 scope.go:117] "RemoveContainer" containerID="79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186" Jan 29 13:35:53 crc kubenswrapper[4787]: E0129 13:35:53.596802 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186\": container with ID starting with 79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186 not found: ID does not exist" containerID="79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.596839 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186"} err="failed to get container status \"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186\": rpc error: code = NotFound desc = could not find container \"79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186\": container with ID starting with 79e4217e7b1328aacee6db7c400f16679b556a83df3bb2b938e24645a0108186 not found: ID does not exist" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.596861 4787 scope.go:117] "RemoveContainer" containerID="6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8" Jan 29 13:35:53 crc kubenswrapper[4787]: E0129 13:35:53.597679 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8\": container with ID starting with 6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8 not found: ID does not exist" containerID="6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.597715 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8"} err="failed to get container status \"6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8\": rpc error: code = NotFound desc = could not find container \"6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8\": container with ID starting with 6ca6c43953487c257e9fdac125efde822e519d34963c2b01da62a8cff3fcd6f8 not found: ID does not exist" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.598954 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.607795 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7878659675-mnv6q"] Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.636253 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 29 13:35:53 crc kubenswrapper[4787]: I0129 13:35:53.993878 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" path="/var/lib/kubelet/pods/dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f/volumes" Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.045013 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.131736 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.531956 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerStarted","Data":"bb8343c68390c1419fd28be0f24f8130ffd16f339fefc6537ba09f0aebc484bc"} Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.533624 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d95df36d-a737-4136-8921-01fe4e028add","Type":"ContainerStarted","Data":"2e159ae76f0bb63f2124cb8a5615db9a9eac4b38c6806f2dcf9a137b01700373"} Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.561526 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=7.106047972 podStartE2EDuration="42.561509625s" podCreationTimestamp="2026-01-29 13:35:12 +0000 UTC" firstStartedPulling="2026-01-29 13:35:18.010546598 +0000 UTC m=+1156.771806894" lastFinishedPulling="2026-01-29 13:35:53.466008271 +0000 UTC m=+1192.227268547" observedRunningTime="2026-01-29 13:35:54.560909131 +0000 UTC m=+1193.322169407" watchObservedRunningTime="2026-01-29 13:35:54.561509625 +0000 UTC m=+1193.322769891" Jan 29 13:35:54 crc kubenswrapper[4787]: I0129 13:35:54.893634 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 13:35:55 crc kubenswrapper[4787]: I0129 13:35:55.543040 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerStarted","Data":"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b"} Jan 29 13:35:55 crc kubenswrapper[4787]: I0129 13:35:55.543373 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerStarted","Data":"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0"} Jan 29 13:35:55 crc kubenswrapper[4787]: I0129 13:35:55.543414 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 29 13:35:55 crc kubenswrapper[4787]: I0129 13:35:55.587562 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.262501683 podStartE2EDuration="3.587497154s" podCreationTimestamp="2026-01-29 13:35:52 +0000 UTC" firstStartedPulling="2026-01-29 13:35:53.53270326 +0000 UTC m=+1192.293963536" lastFinishedPulling="2026-01-29 13:35:54.857698731 +0000 UTC m=+1193.618959007" observedRunningTime="2026-01-29 13:35:55.573316557 +0000 UTC m=+1194.334576873" watchObservedRunningTime="2026-01-29 13:35:55.587497154 +0000 UTC m=+1194.348757470" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.321736 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.325053 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.662715 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-gcwmx"] Jan 29 13:35:58 crc kubenswrapper[4787]: E0129 13:35:58.663244 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="init" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.663286 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="init" Jan 29 13:35:58 crc kubenswrapper[4787]: E0129 13:35:58.663335 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="dnsmasq-dns" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.663348 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="dnsmasq-dns" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.663748 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc65f6c6-f0f2-4ab0-abbf-32dda7f62e9f" containerName="dnsmasq-dns" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.664636 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.671585 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-e093-account-create-update-v7drb"] Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.672610 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.674711 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.687501 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gcwmx"] Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.700441 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e093-account-create-update-v7drb"] Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.767575 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.767958 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.768244 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqwc8\" (UniqueName: \"kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.768554 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgj6k\" (UniqueName: \"kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.870066 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgj6k\" (UniqueName: \"kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.870151 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.870179 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.870261 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqwc8\" (UniqueName: \"kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.872142 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.873481 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.891034 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqwc8\" (UniqueName: \"kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8\") pod \"glance-e093-account-create-update-v7drb\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.893032 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgj6k\" (UniqueName: \"kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k\") pod \"glance-db-create-gcwmx\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:58 crc kubenswrapper[4787]: I0129 13:35:58.994906 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gcwmx" Jan 29 13:35:59 crc kubenswrapper[4787]: I0129 13:35:59.011037 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:35:59 crc kubenswrapper[4787]: I0129 13:35:59.490426 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gcwmx"] Jan 29 13:35:59 crc kubenswrapper[4787]: I0129 13:35:59.499834 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e093-account-create-update-v7drb"] Jan 29 13:35:59 crc kubenswrapper[4787]: I0129 13:35:59.586925 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gcwmx" event={"ID":"a7a32d77-75e8-4777-9ace-f29730eb8e4d","Type":"ContainerStarted","Data":"be1eee0543255a45fe2e08e07d36b8cd8399c5c0a2fe5a2e9487cc943103321f"} Jan 29 13:35:59 crc kubenswrapper[4787]: I0129 13:35:59.588973 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e093-account-create-update-v7drb" event={"ID":"90a095df-6c58-487f-aa30-deca9dc23d47","Type":"ContainerStarted","Data":"8985f0c3d9c430d54fcfde99e32679d1fdc0b29a858ebd073374ed39a4d597b6"} Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.214894 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-tjgpm"] Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.215823 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.218291 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.222719 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-tjgpm"] Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.291011 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5bcl\" (UniqueName: \"kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.291103 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.392892 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5bcl\" (UniqueName: \"kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.393323 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.394075 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.419183 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5bcl\" (UniqueName: \"kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl\") pod \"root-account-create-update-tjgpm\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.568975 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.603402 4787 generic.go:334] "Generic (PLEG): container finished" podID="a7a32d77-75e8-4777-9ace-f29730eb8e4d" containerID="393993cd1c1f3ad4ab9f870f012e6c9668994eba30a91b7a9ee18082dd2d2fe0" exitCode=0 Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.603639 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gcwmx" event={"ID":"a7a32d77-75e8-4777-9ace-f29730eb8e4d","Type":"ContainerDied","Data":"393993cd1c1f3ad4ab9f870f012e6c9668994eba30a91b7a9ee18082dd2d2fe0"} Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.606012 4787 generic.go:334] "Generic (PLEG): container finished" podID="90a095df-6c58-487f-aa30-deca9dc23d47" containerID="c3fa4b959b699ba45eacce41c7659658e2e9f1bdb045347aa100e24db697944e" exitCode=0 Jan 29 13:36:00 crc kubenswrapper[4787]: I0129 13:36:00.606065 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e093-account-create-update-v7drb" event={"ID":"90a095df-6c58-487f-aa30-deca9dc23d47","Type":"ContainerDied","Data":"c3fa4b959b699ba45eacce41c7659658e2e9f1bdb045347aa100e24db697944e"} Jan 29 13:36:01 crc kubenswrapper[4787]: I0129 13:36:01.029922 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-tjgpm"] Jan 29 13:36:01 crc kubenswrapper[4787]: W0129 13:36:01.038556 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod246a790d_434e_4efe_aea0_a147051b15c4.slice/crio-c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85 WatchSource:0}: Error finding container c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85: Status 404 returned error can't find the container with id c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85 Jan 29 13:36:01 crc kubenswrapper[4787]: I0129 13:36:01.616050 4787 generic.go:334] "Generic (PLEG): container finished" podID="246a790d-434e-4efe-aea0-a147051b15c4" containerID="7de70d403eec1d4bc647f68435fe451159d0ea3927072482318db17e1345560d" exitCode=0 Jan 29 13:36:01 crc kubenswrapper[4787]: I0129 13:36:01.616097 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-tjgpm" event={"ID":"246a790d-434e-4efe-aea0-a147051b15c4","Type":"ContainerDied","Data":"7de70d403eec1d4bc647f68435fe451159d0ea3927072482318db17e1345560d"} Jan 29 13:36:01 crc kubenswrapper[4787]: I0129 13:36:01.616378 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-tjgpm" event={"ID":"246a790d-434e-4efe-aea0-a147051b15c4","Type":"ContainerStarted","Data":"c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85"} Jan 29 13:36:01 crc kubenswrapper[4787]: I0129 13:36:01.949856 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gcwmx" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.021888 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgj6k\" (UniqueName: \"kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k\") pod \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.024825 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts\") pod \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\" (UID: \"a7a32d77-75e8-4777-9ace-f29730eb8e4d\") " Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.026075 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7a32d77-75e8-4777-9ace-f29730eb8e4d" (UID: "a7a32d77-75e8-4777-9ace-f29730eb8e4d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.027661 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.030648 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k" (OuterVolumeSpecName: "kube-api-access-hgj6k") pod "a7a32d77-75e8-4777-9ace-f29730eb8e4d" (UID: "a7a32d77-75e8-4777-9ace-f29730eb8e4d"). InnerVolumeSpecName "kube-api-access-hgj6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.126527 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqwc8\" (UniqueName: \"kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8\") pod \"90a095df-6c58-487f-aa30-deca9dc23d47\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.126564 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts\") pod \"90a095df-6c58-487f-aa30-deca9dc23d47\" (UID: \"90a095df-6c58-487f-aa30-deca9dc23d47\") " Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.126839 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7a32d77-75e8-4777-9ace-f29730eb8e4d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.126863 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgj6k\" (UniqueName: \"kubernetes.io/projected/a7a32d77-75e8-4777-9ace-f29730eb8e4d-kube-api-access-hgj6k\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.127062 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "90a095df-6c58-487f-aa30-deca9dc23d47" (UID: "90a095df-6c58-487f-aa30-deca9dc23d47"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.129241 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8" (OuterVolumeSpecName: "kube-api-access-hqwc8") pod "90a095df-6c58-487f-aa30-deca9dc23d47" (UID: "90a095df-6c58-487f-aa30-deca9dc23d47"). InnerVolumeSpecName "kube-api-access-hqwc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.232425 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqwc8\" (UniqueName: \"kubernetes.io/projected/90a095df-6c58-487f-aa30-deca9dc23d47-kube-api-access-hqwc8\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.232481 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/90a095df-6c58-487f-aa30-deca9dc23d47-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.627012 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gcwmx" event={"ID":"a7a32d77-75e8-4777-9ace-f29730eb8e4d","Type":"ContainerDied","Data":"be1eee0543255a45fe2e08e07d36b8cd8399c5c0a2fe5a2e9487cc943103321f"} Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.627435 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be1eee0543255a45fe2e08e07d36b8cd8399c5c0a2fe5a2e9487cc943103321f" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.627601 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gcwmx" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.647330 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-v7drb" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.647485 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e093-account-create-update-v7drb" event={"ID":"90a095df-6c58-487f-aa30-deca9dc23d47","Type":"ContainerDied","Data":"8985f0c3d9c430d54fcfde99e32679d1fdc0b29a858ebd073374ed39a4d597b6"} Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.647531 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8985f0c3d9c430d54fcfde99e32679d1fdc0b29a858ebd073374ed39a4d597b6" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.871399 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-k92qs"] Jan 29 13:36:02 crc kubenswrapper[4787]: E0129 13:36:02.871822 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7a32d77-75e8-4777-9ace-f29730eb8e4d" containerName="mariadb-database-create" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.871844 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7a32d77-75e8-4777-9ace-f29730eb8e4d" containerName="mariadb-database-create" Jan 29 13:36:02 crc kubenswrapper[4787]: E0129 13:36:02.871875 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90a095df-6c58-487f-aa30-deca9dc23d47" containerName="mariadb-account-create-update" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.871882 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="90a095df-6c58-487f-aa30-deca9dc23d47" containerName="mariadb-account-create-update" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.872051 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7a32d77-75e8-4777-9ace-f29730eb8e4d" containerName="mariadb-database-create" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.872067 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="90a095df-6c58-487f-aa30-deca9dc23d47" containerName="mariadb-account-create-update" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.872541 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.898307 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k92qs"] Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.953588 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.953641 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gn8h\" (UniqueName: \"kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.969157 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dee3-account-create-update-zj75j"] Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.970402 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.973808 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 13:36:02 crc kubenswrapper[4787]: I0129 13:36:02.979079 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dee3-account-create-update-zj75j"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.022751 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.055416 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.055467 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gn8h\" (UniqueName: \"kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.055572 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.055623 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wlc8\" (UniqueName: \"kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.062918 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.075110 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gn8h\" (UniqueName: \"kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h\") pod \"keystone-db-create-k92qs\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.157167 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5bcl\" (UniqueName: \"kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl\") pod \"246a790d-434e-4efe-aea0-a147051b15c4\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.157398 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts\") pod \"246a790d-434e-4efe-aea0-a147051b15c4\" (UID: \"246a790d-434e-4efe-aea0-a147051b15c4\") " Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.158252 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.158355 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wlc8\" (UniqueName: \"kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.158551 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "246a790d-434e-4efe-aea0-a147051b15c4" (UID: "246a790d-434e-4efe-aea0-a147051b15c4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.159267 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.167746 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl" (OuterVolumeSpecName: "kube-api-access-b5bcl") pod "246a790d-434e-4efe-aea0-a147051b15c4" (UID: "246a790d-434e-4efe-aea0-a147051b15c4"). InnerVolumeSpecName "kube-api-access-b5bcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.173978 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-7xwdd"] Jan 29 13:36:03 crc kubenswrapper[4787]: E0129 13:36:03.174293 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="246a790d-434e-4efe-aea0-a147051b15c4" containerName="mariadb-account-create-update" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.174309 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="246a790d-434e-4efe-aea0-a147051b15c4" containerName="mariadb-account-create-update" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.174450 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="246a790d-434e-4efe-aea0-a147051b15c4" containerName="mariadb-account-create-update" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.174930 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.189209 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7xwdd"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.191058 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wlc8\" (UniqueName: \"kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8\") pod \"keystone-dee3-account-create-update-zj75j\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.193199 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.259896 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tljj2\" (UniqueName: \"kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.260860 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.260986 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5bcl\" (UniqueName: \"kubernetes.io/projected/246a790d-434e-4efe-aea0-a147051b15c4-kube-api-access-b5bcl\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.261002 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/246a790d-434e-4efe-aea0-a147051b15c4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.282576 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-e0f8-account-create-update-nqqt5"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.283706 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.286152 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.293790 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e0f8-account-create-update-nqqt5"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.338086 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.367860 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpsbb\" (UniqueName: \"kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.368392 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.368516 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tljj2\" (UniqueName: \"kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.368570 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.369541 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.385064 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tljj2\" (UniqueName: \"kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2\") pod \"placement-db-create-7xwdd\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.471595 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.471681 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpsbb\" (UniqueName: \"kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.472310 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.487049 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpsbb\" (UniqueName: \"kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb\") pod \"placement-e0f8-account-create-update-nqqt5\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.575840 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.611399 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.775578 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-tjgpm" event={"ID":"246a790d-434e-4efe-aea0-a147051b15c4","Type":"ContainerDied","Data":"c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85"} Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.775647 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c98f321571faecff15b5fe64a8744ad30b501506f9bed9ef1069bd3326868b85" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.775759 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-tjgpm" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.833513 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-k92qs"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.847530 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dee3-account-create-update-zj75j"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.859539 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-l5s5r"] Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.860538 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.862737 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.862979 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-tk4jm" Jan 29 13:36:03 crc kubenswrapper[4787]: I0129 13:36:03.865389 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-l5s5r"] Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.001355 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.001803 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.001977 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.002186 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6sxm\" (UniqueName: \"kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.107680 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.107774 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.107890 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6sxm\" (UniqueName: \"kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.108059 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.123293 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7xwdd"] Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.130043 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.131948 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.133732 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.134389 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6sxm\" (UniqueName: \"kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm\") pod \"glance-db-sync-l5s5r\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: W0129 13:36:04.143153 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf93a192d_1d61_41b8_aef2_d8badc0cb9df.slice/crio-c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d WatchSource:0}: Error finding container c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d: Status 404 returned error can't find the container with id c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.181239 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.198323 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e0f8-account-create-update-nqqt5"] Jan 29 13:36:04 crc kubenswrapper[4787]: W0129 13:36:04.213799 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf021b258_9578_4a25_af1f_2456434d0cda.slice/crio-17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829 WatchSource:0}: Error finding container 17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829: Status 404 returned error can't find the container with id 17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829 Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.783647 4787 generic.go:334] "Generic (PLEG): container finished" podID="391a21f6-7c95-41ff-9197-9ed01d35e73b" containerID="40a5f44d8467a9435485b29a34515f70ad41f84edfb257cc8246ef8156bb7e1c" exitCode=0 Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.783740 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dee3-account-create-update-zj75j" event={"ID":"391a21f6-7c95-41ff-9197-9ed01d35e73b","Type":"ContainerDied","Data":"40a5f44d8467a9435485b29a34515f70ad41f84edfb257cc8246ef8156bb7e1c"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.783939 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dee3-account-create-update-zj75j" event={"ID":"391a21f6-7c95-41ff-9197-9ed01d35e73b","Type":"ContainerStarted","Data":"5b73e3016e381622a41e0d965b05abb7facdc5fbc3fee271b5a8a633a900216a"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.789024 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e0f8-account-create-update-nqqt5" event={"ID":"f021b258-9578-4a25-af1f-2456434d0cda","Type":"ContainerStarted","Data":"82d697dced0239e8241f4d84cecf64d41679b7b1323a0e0ba8d9f1e602edbb31"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.789145 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e0f8-account-create-update-nqqt5" event={"ID":"f021b258-9578-4a25-af1f-2456434d0cda","Type":"ContainerStarted","Data":"17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.791416 4787 generic.go:334] "Generic (PLEG): container finished" podID="499a4ece-1afc-472d-9f39-76f56d1c8681" containerID="847559ad6f873e60f8c0719c850857931fb348773e273e63f65fe30b1ec026f0" exitCode=0 Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.791759 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k92qs" event={"ID":"499a4ece-1afc-472d-9f39-76f56d1c8681","Type":"ContainerDied","Data":"847559ad6f873e60f8c0719c850857931fb348773e273e63f65fe30b1ec026f0"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.791792 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k92qs" event={"ID":"499a4ece-1afc-472d-9f39-76f56d1c8681","Type":"ContainerStarted","Data":"c4c7dfd5f840efd6dc537b78173aeac5063e65312e78f5d24a663a84016de368"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.797146 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7xwdd" event={"ID":"f93a192d-1d61-41b8-aef2-d8badc0cb9df","Type":"ContainerStarted","Data":"dfb03cfdd6c75dcc1d308c835ace9090bb9e4ec6b5eeac917065641d760e1a38"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.797180 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7xwdd" event={"ID":"f93a192d-1d61-41b8-aef2-d8badc0cb9df","Type":"ContainerStarted","Data":"c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d"} Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.883338 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-e0f8-account-create-update-nqqt5" podStartSLOduration=1.8833217759999998 podStartE2EDuration="1.883321776s" podCreationTimestamp="2026-01-29 13:36:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:04.881631517 +0000 UTC m=+1203.642891783" watchObservedRunningTime="2026-01-29 13:36:04.883321776 +0000 UTC m=+1203.644582052" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.884318 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-7xwdd" podStartSLOduration=1.884312969 podStartE2EDuration="1.884312969s" podCreationTimestamp="2026-01-29 13:36:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:04.852227309 +0000 UTC m=+1203.613487585" watchObservedRunningTime="2026-01-29 13:36:04.884312969 +0000 UTC m=+1203.645573245" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.899684 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.900920 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.930733 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:04 crc kubenswrapper[4787]: I0129 13:36:04.948391 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-l5s5r"] Jan 29 13:36:04 crc kubenswrapper[4787]: W0129 13:36:04.963661 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbc05e24_bbf4_44e2_9cd3_40c095f56aea.slice/crio-bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa WatchSource:0}: Error finding container bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa: Status 404 returned error can't find the container with id bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.023499 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.023561 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj6gm\" (UniqueName: \"kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.023760 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.023880 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.024065 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.125028 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.125084 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.125123 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj6gm\" (UniqueName: \"kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.125159 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.125199 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.126929 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.127942 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.128348 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.128415 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.146814 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj6gm\" (UniqueName: \"kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm\") pod \"dnsmasq-dns-67fdf7998c-kxw2m\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.216071 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.686017 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:05 crc kubenswrapper[4787]: W0129 13:36:05.694081 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7b05d84_4045_491a_98d9_e974a5ad3d86.slice/crio-c96b2a838d749f373a3776419bfd46a50e6bd086b5ab237ab7b73861df622fbe WatchSource:0}: Error finding container c96b2a838d749f373a3776419bfd46a50e6bd086b5ab237ab7b73861df622fbe: Status 404 returned error can't find the container with id c96b2a838d749f373a3776419bfd46a50e6bd086b5ab237ab7b73861df622fbe Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.806557 4787 generic.go:334] "Generic (PLEG): container finished" podID="f93a192d-1d61-41b8-aef2-d8badc0cb9df" containerID="dfb03cfdd6c75dcc1d308c835ace9090bb9e4ec6b5eeac917065641d760e1a38" exitCode=0 Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.806764 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7xwdd" event={"ID":"f93a192d-1d61-41b8-aef2-d8badc0cb9df","Type":"ContainerDied","Data":"dfb03cfdd6c75dcc1d308c835ace9090bb9e4ec6b5eeac917065641d760e1a38"} Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.808961 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" event={"ID":"c7b05d84-4045-491a-98d9-e974a5ad3d86","Type":"ContainerStarted","Data":"c96b2a838d749f373a3776419bfd46a50e6bd086b5ab237ab7b73861df622fbe"} Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.810031 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l5s5r" event={"ID":"cbc05e24-bbf4-44e2-9cd3-40c095f56aea","Type":"ContainerStarted","Data":"bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa"} Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.811930 4787 generic.go:334] "Generic (PLEG): container finished" podID="f021b258-9578-4a25-af1f-2456434d0cda" containerID="82d697dced0239e8241f4d84cecf64d41679b7b1323a0e0ba8d9f1e602edbb31" exitCode=0 Jan 29 13:36:05 crc kubenswrapper[4787]: I0129 13:36:05.812008 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e0f8-account-create-update-nqqt5" event={"ID":"f021b258-9578-4a25-af1f-2456434d0cda","Type":"ContainerDied","Data":"82d697dced0239e8241f4d84cecf64d41679b7b1323a0e0ba8d9f1e602edbb31"} Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.012162 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.025310 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.030738 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.030938 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.031073 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.031241 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-2jq5c" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.034765 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.145684 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.145973 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.146044 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.146072 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.146113 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kxlb\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.146132 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.244851 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247110 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kxlb\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247146 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247199 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247225 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247267 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247284 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.247407 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.247424 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.247474 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:06.74744623 +0000 UTC m=+1205.508706506 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.247969 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.248049 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.248119 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.252399 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.274086 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kxlb\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.278978 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.322790 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.348655 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts\") pod \"391a21f6-7c95-41ff-9197-9ed01d35e73b\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.348762 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wlc8\" (UniqueName: \"kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8\") pod \"391a21f6-7c95-41ff-9197-9ed01d35e73b\" (UID: \"391a21f6-7c95-41ff-9197-9ed01d35e73b\") " Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.349312 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "391a21f6-7c95-41ff-9197-9ed01d35e73b" (UID: "391a21f6-7c95-41ff-9197-9ed01d35e73b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.354766 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8" (OuterVolumeSpecName: "kube-api-access-2wlc8") pod "391a21f6-7c95-41ff-9197-9ed01d35e73b" (UID: "391a21f6-7c95-41ff-9197-9ed01d35e73b"). InnerVolumeSpecName "kube-api-access-2wlc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.450075 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gn8h\" (UniqueName: \"kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h\") pod \"499a4ece-1afc-472d-9f39-76f56d1c8681\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.450660 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts\") pod \"499a4ece-1afc-472d-9f39-76f56d1c8681\" (UID: \"499a4ece-1afc-472d-9f39-76f56d1c8681\") " Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.451025 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/391a21f6-7c95-41ff-9197-9ed01d35e73b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.451042 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wlc8\" (UniqueName: \"kubernetes.io/projected/391a21f6-7c95-41ff-9197-9ed01d35e73b-kube-api-access-2wlc8\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.452010 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "499a4ece-1afc-472d-9f39-76f56d1c8681" (UID: "499a4ece-1afc-472d-9f39-76f56d1c8681"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.454415 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h" (OuterVolumeSpecName: "kube-api-access-2gn8h") pod "499a4ece-1afc-472d-9f39-76f56d1c8681" (UID: "499a4ece-1afc-472d-9f39-76f56d1c8681"). InnerVolumeSpecName "kube-api-access-2gn8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.553043 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/499a4ece-1afc-472d-9f39-76f56d1c8681-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.553091 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gn8h\" (UniqueName: \"kubernetes.io/projected/499a4ece-1afc-472d-9f39-76f56d1c8681-kube-api-access-2gn8h\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.689632 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-tjgpm"] Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.694788 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-tjgpm"] Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.756099 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.756244 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.756335 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: E0129 13:36:06.756387 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:07.756369896 +0000 UTC m=+1206.517630172 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.828082 4787 generic.go:334] "Generic (PLEG): container finished" podID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerID="234ee080c589fad4dd9d9f3bb31393a993de18379e3d15dbfeb20ef186e88f88" exitCode=0 Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.828150 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" event={"ID":"c7b05d84-4045-491a-98d9-e974a5ad3d86","Type":"ContainerDied","Data":"234ee080c589fad4dd9d9f3bb31393a993de18379e3d15dbfeb20ef186e88f88"} Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.833258 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dee3-account-create-update-zj75j" event={"ID":"391a21f6-7c95-41ff-9197-9ed01d35e73b","Type":"ContainerDied","Data":"5b73e3016e381622a41e0d965b05abb7facdc5fbc3fee271b5a8a633a900216a"} Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.833291 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b73e3016e381622a41e0d965b05abb7facdc5fbc3fee271b5a8a633a900216a" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.833340 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-zj75j" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.836254 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-k92qs" Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.837502 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-k92qs" event={"ID":"499a4ece-1afc-472d-9f39-76f56d1c8681","Type":"ContainerDied","Data":"c4c7dfd5f840efd6dc537b78173aeac5063e65312e78f5d24a663a84016de368"} Jan 29 13:36:06 crc kubenswrapper[4787]: I0129 13:36:06.837530 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4c7dfd5f840efd6dc537b78173aeac5063e65312e78f5d24a663a84016de368" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.333226 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.342753 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.474432 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpsbb\" (UniqueName: \"kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb\") pod \"f021b258-9578-4a25-af1f-2456434d0cda\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.474580 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tljj2\" (UniqueName: \"kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2\") pod \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.474692 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts\") pod \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\" (UID: \"f93a192d-1d61-41b8-aef2-d8badc0cb9df\") " Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.474795 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts\") pod \"f021b258-9578-4a25-af1f-2456434d0cda\" (UID: \"f021b258-9578-4a25-af1f-2456434d0cda\") " Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.476288 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f93a192d-1d61-41b8-aef2-d8badc0cb9df" (UID: "f93a192d-1d61-41b8-aef2-d8badc0cb9df"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.476347 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f021b258-9578-4a25-af1f-2456434d0cda" (UID: "f021b258-9578-4a25-af1f-2456434d0cda"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.481174 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2" (OuterVolumeSpecName: "kube-api-access-tljj2") pod "f93a192d-1d61-41b8-aef2-d8badc0cb9df" (UID: "f93a192d-1d61-41b8-aef2-d8badc0cb9df"). InnerVolumeSpecName "kube-api-access-tljj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.481302 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb" (OuterVolumeSpecName: "kube-api-access-zpsbb") pod "f021b258-9578-4a25-af1f-2456434d0cda" (UID: "f021b258-9578-4a25-af1f-2456434d0cda"). InnerVolumeSpecName "kube-api-access-zpsbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.576100 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f021b258-9578-4a25-af1f-2456434d0cda-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.576442 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpsbb\" (UniqueName: \"kubernetes.io/projected/f021b258-9578-4a25-af1f-2456434d0cda-kube-api-access-zpsbb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.576492 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tljj2\" (UniqueName: \"kubernetes.io/projected/f93a192d-1d61-41b8-aef2-d8badc0cb9df-kube-api-access-tljj2\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.576501 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f93a192d-1d61-41b8-aef2-d8badc0cb9df-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.779212 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:07 crc kubenswrapper[4787]: E0129 13:36:07.779478 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:07 crc kubenswrapper[4787]: E0129 13:36:07.779493 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:07 crc kubenswrapper[4787]: E0129 13:36:07.779532 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:09.779518359 +0000 UTC m=+1208.540778625 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.848325 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" event={"ID":"c7b05d84-4045-491a-98d9-e974a5ad3d86","Type":"ContainerStarted","Data":"c89de210dc907d1b198039bb2de1c4745c6f38d0192e810a397149293e3411dd"} Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.848487 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.852097 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e0f8-account-create-update-nqqt5" event={"ID":"f021b258-9578-4a25-af1f-2456434d0cda","Type":"ContainerDied","Data":"17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829"} Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.852147 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17c9b2a8de62eb5020077fa9526a60f64e843399cc80a1bbe52824beb3028829" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.852240 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e0f8-account-create-update-nqqt5" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.855320 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7xwdd" event={"ID":"f93a192d-1d61-41b8-aef2-d8badc0cb9df","Type":"ContainerDied","Data":"c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d"} Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.855353 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1cf36472e68672d8d2748ccda565bde240dd8443d6100261721470ef9e0f18d" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.855421 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7xwdd" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.875843 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" podStartSLOduration=3.875829111 podStartE2EDuration="3.875829111s" podCreationTimestamp="2026-01-29 13:36:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:07.874555302 +0000 UTC m=+1206.635815588" watchObservedRunningTime="2026-01-29 13:36:07.875829111 +0000 UTC m=+1206.637089397" Jan 29 13:36:07 crc kubenswrapper[4787]: I0129 13:36:07.996267 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="246a790d-434e-4efe-aea0-a147051b15c4" path="/var/lib/kubelet/pods/246a790d-434e-4efe-aea0-a147051b15c4/volumes" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.817317 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.817555 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.817592 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.817727 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:13.817704949 +0000 UTC m=+1212.578965225 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.934312 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-r9qrn"] Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.934939 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="391a21f6-7c95-41ff-9197-9ed01d35e73b" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.935049 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="391a21f6-7c95-41ff-9197-9ed01d35e73b" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.935142 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="499a4ece-1afc-472d-9f39-76f56d1c8681" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.935225 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="499a4ece-1afc-472d-9f39-76f56d1c8681" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.935304 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f021b258-9578-4a25-af1f-2456434d0cda" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.935385 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f021b258-9578-4a25-af1f-2456434d0cda" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.935495 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93a192d-1d61-41b8-aef2-d8badc0cb9df" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.938400 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93a192d-1d61-41b8-aef2-d8badc0cb9df" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.938721 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f021b258-9578-4a25-af1f-2456434d0cda" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.938822 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f93a192d-1d61-41b8-aef2-d8badc0cb9df" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.938917 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="391a21f6-7c95-41ff-9197-9ed01d35e73b" containerName="mariadb-account-create-update" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.939010 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="499a4ece-1afc-472d-9f39-76f56d1c8681" containerName="mariadb-database-create" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.939693 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.941387 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.944657 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.944804 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.946221 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-r9qrn"] Jan 29 13:36:09 crc kubenswrapper[4787]: E0129 13:36:09.977958 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-w4zn5 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-w4zn5 ring-data-devices scripts swiftconf]: context canceled" pod="openstack/swift-ring-rebalance-r9qrn" podUID="7e40265b-78ef-498f-9b7a-03b059d64a8d" Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.983348 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-9zlwc"] Jan 29 13:36:09 crc kubenswrapper[4787]: I0129 13:36:09.984392 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.000993 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9zlwc"] Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.011081 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-r9qrn"] Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020402 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020558 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020598 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4zn5\" (UniqueName: \"kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020729 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020757 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020865 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.020925 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122445 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122525 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122545 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122597 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122652 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122684 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122703 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122731 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122760 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122809 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g76g7\" (UniqueName: \"kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122845 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122862 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122902 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.122920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4zn5\" (UniqueName: \"kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.124320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.124643 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.125042 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.129605 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.130041 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.137664 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.143101 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4zn5\" (UniqueName: \"kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5\") pod \"swift-ring-rebalance-r9qrn\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225000 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225186 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225207 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225236 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225276 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.225311 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g76g7\" (UniqueName: \"kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.226512 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.226705 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.226873 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.229125 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.229756 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.231204 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.240939 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g76g7\" (UniqueName: \"kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7\") pod \"swift-ring-rebalance-9zlwc\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.305857 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.789770 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9zlwc"] Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.889624 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:10 crc kubenswrapper[4787]: I0129 13:36:10.902533 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039510 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039679 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039745 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039805 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039850 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.039918 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040009 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4zn5\" (UniqueName: \"kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5\") pod \"7e40265b-78ef-498f-9b7a-03b059d64a8d\" (UID: \"7e40265b-78ef-498f-9b7a-03b059d64a8d\") " Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040126 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040294 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040422 4787 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/7e40265b-78ef-498f-9b7a-03b059d64a8d-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040443 4787 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.040547 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts" (OuterVolumeSpecName: "scripts") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.044886 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5" (OuterVolumeSpecName: "kube-api-access-w4zn5") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "kube-api-access-w4zn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.045123 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.045213 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.045261 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "7e40265b-78ef-498f-9b7a-03b059d64a8d" (UID: "7e40265b-78ef-498f-9b7a-03b059d64a8d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.141697 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4zn5\" (UniqueName: \"kubernetes.io/projected/7e40265b-78ef-498f-9b7a-03b059d64a8d-kube-api-access-w4zn5\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.141733 4787 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.141743 4787 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.141768 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e40265b-78ef-498f-9b7a-03b059d64a8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.141776 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e40265b-78ef-498f-9b7a-03b059d64a8d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.696581 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-9f5q7"] Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.699013 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.702802 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.708047 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-9f5q7"] Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.753855 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.754219 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdql8\" (UniqueName: \"kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.855530 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdql8\" (UniqueName: \"kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.855662 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.856743 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.872083 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdql8\" (UniqueName: \"kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8\") pod \"root-account-create-update-9f5q7\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.897808 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r9qrn" Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.969675 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-r9qrn"] Jan 29 13:36:11 crc kubenswrapper[4787]: I0129 13:36:11.977500 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-r9qrn"] Jan 29 13:36:12 crc kubenswrapper[4787]: I0129 13:36:12.011196 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e40265b-78ef-498f-9b7a-03b059d64a8d" path="/var/lib/kubelet/pods/7e40265b-78ef-498f-9b7a-03b059d64a8d/volumes" Jan 29 13:36:12 crc kubenswrapper[4787]: I0129 13:36:12.023588 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.090035 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.887769 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:13 crc kubenswrapper[4787]: E0129 13:36:13.887954 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:13 crc kubenswrapper[4787]: E0129 13:36:13.887983 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:13 crc kubenswrapper[4787]: E0129 13:36:13.888058 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:21.888041601 +0000 UTC m=+1220.649301877 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.934003 4787 generic.go:334] "Generic (PLEG): container finished" podID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerID="a6c0ad3143ab1f25e2f8fefaf9710ebb5e0dda180c2744c012d09871106cf7a3" exitCode=0 Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.934072 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerDied","Data":"a6c0ad3143ab1f25e2f8fefaf9710ebb5e0dda180c2744c012d09871106cf7a3"} Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.936229 4787 generic.go:334] "Generic (PLEG): container finished" podID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerID="fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413" exitCode=0 Jan 29 13:36:13 crc kubenswrapper[4787]: I0129 13:36:13.936261 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerDied","Data":"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413"} Jan 29 13:36:15 crc kubenswrapper[4787]: I0129 13:36:15.217801 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:15 crc kubenswrapper[4787]: I0129 13:36:15.322862 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:36:15 crc kubenswrapper[4787]: I0129 13:36:15.323208 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="dnsmasq-dns" containerID="cri-o://c3dea469118180458bfbe46e770e1fb9c15e0b551961c69cd7dcd6c6ea961cd2" gracePeriod=10 Jan 29 13:36:15 crc kubenswrapper[4787]: I0129 13:36:15.953937 4787 generic.go:334] "Generic (PLEG): container finished" podID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerID="c3dea469118180458bfbe46e770e1fb9c15e0b551961c69cd7dcd6c6ea961cd2" exitCode=0 Jan 29 13:36:15 crc kubenswrapper[4787]: I0129 13:36:15.953992 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" event={"ID":"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16","Type":"ContainerDied","Data":"c3dea469118180458bfbe46e770e1fb9c15e0b551961c69cd7dcd6c6ea961cd2"} Jan 29 13:36:17 crc kubenswrapper[4787]: I0129 13:36:17.418187 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.113:5353: connect: connection refused" Jan 29 13:36:18 crc kubenswrapper[4787]: I0129 13:36:18.677041 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-hz6gf" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" probeResult="failure" output=< Jan 29 13:36:18 crc kubenswrapper[4787]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 29 13:36:18 crc kubenswrapper[4787]: > Jan 29 13:36:18 crc kubenswrapper[4787]: I0129 13:36:18.748048 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:36:18 crc kubenswrapper[4787]: I0129 13:36:18.753801 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:36:18 crc kubenswrapper[4787]: W0129 13:36:18.796439 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdeb5cf3_4f9a_4f9d_8559_648f5079397a.slice/crio-7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f WatchSource:0}: Error finding container 7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f: Status 404 returned error can't find the container with id 7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f Jan 29 13:36:18 crc kubenswrapper[4787]: I0129 13:36:18.996745 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9zlwc" event={"ID":"fdeb5cf3-4f9a-4f9d-8559-648f5079397a","Type":"ContainerStarted","Data":"7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f"} Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.028838 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hz6gf-config-zv5xb"] Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.029990 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.032524 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf-config-zv5xb"] Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.047956 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097173 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097238 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpzqv\" (UniqueName: \"kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097301 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097348 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097491 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.097572 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.187436 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199050 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199136 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199218 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199266 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpzqv\" (UniqueName: \"kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199304 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199344 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199837 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199936 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.199991 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.200629 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.201567 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.227482 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpzqv\" (UniqueName: \"kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv\") pod \"ovn-controller-hz6gf-config-zv5xb\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.300106 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddf76\" (UniqueName: \"kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76\") pod \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.300146 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb\") pod \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.300170 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb\") pod \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.300266 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config\") pod \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.300281 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc\") pod \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\" (UID: \"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16\") " Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.307720 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76" (OuterVolumeSpecName: "kube-api-access-ddf76") pod "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" (UID: "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16"). InnerVolumeSpecName "kube-api-access-ddf76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.341623 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" (UID: "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.348278 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config" (OuterVolumeSpecName: "config") pod "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" (UID: "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.348333 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" (UID: "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.382395 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" (UID: "87c258a2-fcd7-47eb-a3a0-a8a1475c6a16"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.389724 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.402299 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.402332 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.402348 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddf76\" (UniqueName: \"kubernetes.io/projected/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-kube-api-access-ddf76\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.402359 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.402371 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.422066 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-9f5q7"] Jan 29 13:36:19 crc kubenswrapper[4787]: I0129 13:36:19.873353 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf-config-zv5xb"] Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.006687 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-zv5xb" event={"ID":"29912110-dfbe-401f-aba1-0cd16e30cfb6","Type":"ContainerStarted","Data":"73c09076a730004d0e97ee0c94b37cb907413f07ceb9bd305d605f74f3920dac"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.013046 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l5s5r" event={"ID":"cbc05e24-bbf4-44e2-9cd3-40c095f56aea","Type":"ContainerStarted","Data":"ba5db01e0916c4c88d6a2536d7cef2d0569d87e7483b40333d2ad311475774dd"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.021117 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" event={"ID":"87c258a2-fcd7-47eb-a3a0-a8a1475c6a16","Type":"ContainerDied","Data":"480bf948802b065e810f1873e0dd247c961894a089287a02d7f759659cf6424d"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.021767 4787 scope.go:117] "RemoveContainer" containerID="c3dea469118180458bfbe46e770e1fb9c15e0b551961c69cd7dcd6c6ea961cd2" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.021137 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-stgl5" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.023263 4787 generic.go:334] "Generic (PLEG): container finished" podID="65702811-f528-4d05-a240-af4d28db992b" containerID="468de5bd92c2ecc0ab49c8f14f45a5dcafa2c582c73d2fb214ca85a44e720357" exitCode=0 Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.023342 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9f5q7" event={"ID":"65702811-f528-4d05-a240-af4d28db992b","Type":"ContainerDied","Data":"468de5bd92c2ecc0ab49c8f14f45a5dcafa2c582c73d2fb214ca85a44e720357"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.023367 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9f5q7" event={"ID":"65702811-f528-4d05-a240-af4d28db992b","Type":"ContainerStarted","Data":"d1035cec672a10b7bc55b8ad19f7382e3250f032d6e7c26cc1c863060433d6b6"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.032513 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerStarted","Data":"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.032889 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.043618 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerStarted","Data":"19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4"} Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.043850 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.043865 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-l5s5r" podStartSLOduration=3.089003173 podStartE2EDuration="17.043848613s" podCreationTimestamp="2026-01-29 13:36:03 +0000 UTC" firstStartedPulling="2026-01-29 13:36:04.965682427 +0000 UTC m=+1203.726942703" lastFinishedPulling="2026-01-29 13:36:18.920527867 +0000 UTC m=+1217.681788143" observedRunningTime="2026-01-29 13:36:20.02768727 +0000 UTC m=+1218.788947546" watchObservedRunningTime="2026-01-29 13:36:20.043848613 +0000 UTC m=+1218.805108889" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.048385 4787 scope.go:117] "RemoveContainer" containerID="bfcbe8d49851c9ea391b6ad1eed625616efe4028f3faea7bd0158cc4417c4ab3" Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.072018 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.081264 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-stgl5"] Jan 29 13:36:20 crc kubenswrapper[4787]: I0129 13:36:20.131199 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=42.070867423 podStartE2EDuration="1m12.131181029s" podCreationTimestamp="2026-01-29 13:35:08 +0000 UTC" firstStartedPulling="2026-01-29 13:35:10.335983443 +0000 UTC m=+1149.097243709" lastFinishedPulling="2026-01-29 13:35:40.396297039 +0000 UTC m=+1179.157557315" observedRunningTime="2026-01-29 13:36:20.098168537 +0000 UTC m=+1218.859428833" watchObservedRunningTime="2026-01-29 13:36:20.131181029 +0000 UTC m=+1218.892441305" Jan 29 13:36:21 crc kubenswrapper[4787]: I0129 13:36:21.052791 4787 generic.go:334] "Generic (PLEG): container finished" podID="29912110-dfbe-401f-aba1-0cd16e30cfb6" containerID="392454853d694c87aee62f7a61f6214cfe1b02f5a83cef2c0a2fb8b1edcea033" exitCode=0 Jan 29 13:36:21 crc kubenswrapper[4787]: I0129 13:36:21.052902 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-zv5xb" event={"ID":"29912110-dfbe-401f-aba1-0cd16e30cfb6","Type":"ContainerDied","Data":"392454853d694c87aee62f7a61f6214cfe1b02f5a83cef2c0a2fb8b1edcea033"} Jan 29 13:36:21 crc kubenswrapper[4787]: I0129 13:36:21.079241 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=43.48163685 podStartE2EDuration="1m12.079219629s" podCreationTimestamp="2026-01-29 13:35:09 +0000 UTC" firstStartedPulling="2026-01-29 13:35:10.882625687 +0000 UTC m=+1149.643885963" lastFinishedPulling="2026-01-29 13:35:39.480208426 +0000 UTC m=+1178.241468742" observedRunningTime="2026-01-29 13:36:20.129125751 +0000 UTC m=+1218.890386027" watchObservedRunningTime="2026-01-29 13:36:21.079219629 +0000 UTC m=+1219.840479905" Jan 29 13:36:21 crc kubenswrapper[4787]: I0129 13:36:21.968747 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:21 crc kubenswrapper[4787]: E0129 13:36:21.968974 4787 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 29 13:36:21 crc kubenswrapper[4787]: E0129 13:36:21.969005 4787 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 29 13:36:21 crc kubenswrapper[4787]: E0129 13:36:21.969065 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift podName:8d475a95-10b2-46bb-a74a-e96b6bf70bfe nodeName:}" failed. No retries permitted until 2026-01-29 13:36:37.969047775 +0000 UTC m=+1236.730308051 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift") pod "swift-storage-0" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe") : configmap "swift-ring-files" not found Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.008264 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" path="/var/lib/kubelet/pods/87c258a2-fcd7-47eb-a3a0-a8a1475c6a16/volumes" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.538083 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.546356 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682135 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682201 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682264 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts\") pod \"65702811-f528-4d05-a240-af4d28db992b\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682280 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682319 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdql8\" (UniqueName: \"kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8\") pod \"65702811-f528-4d05-a240-af4d28db992b\" (UID: \"65702811-f528-4d05-a240-af4d28db992b\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682419 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.682443 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpzqv\" (UniqueName: \"kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684038 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts\") pod \"29912110-dfbe-401f-aba1-0cd16e30cfb6\" (UID: \"29912110-dfbe-401f-aba1-0cd16e30cfb6\") " Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684267 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65702811-f528-4d05-a240-af4d28db992b" (UID: "65702811-f528-4d05-a240-af4d28db992b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684268 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684317 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run" (OuterVolumeSpecName: "var-run") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684786 4787 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684802 4787 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684812 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65702811-f528-4d05-a240-af4d28db992b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.684962 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.685421 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.685567 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts" (OuterVolumeSpecName: "scripts") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.689752 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv" (OuterVolumeSpecName: "kube-api-access-wpzqv") pod "29912110-dfbe-401f-aba1-0cd16e30cfb6" (UID: "29912110-dfbe-401f-aba1-0cd16e30cfb6"). InnerVolumeSpecName "kube-api-access-wpzqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.690384 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8" (OuterVolumeSpecName: "kube-api-access-pdql8") pod "65702811-f528-4d05-a240-af4d28db992b" (UID: "65702811-f528-4d05-a240-af4d28db992b"). InnerVolumeSpecName "kube-api-access-pdql8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.786932 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpzqv\" (UniqueName: \"kubernetes.io/projected/29912110-dfbe-401f-aba1-0cd16e30cfb6-kube-api-access-wpzqv\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.786964 4787 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.786977 4787 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/29912110-dfbe-401f-aba1-0cd16e30cfb6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.786988 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdql8\" (UniqueName: \"kubernetes.io/projected/65702811-f528-4d05-a240-af4d28db992b-kube-api-access-pdql8\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:22 crc kubenswrapper[4787]: I0129 13:36:22.786999 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29912110-dfbe-401f-aba1-0cd16e30cfb6-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.076671 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-zv5xb" event={"ID":"29912110-dfbe-401f-aba1-0cd16e30cfb6","Type":"ContainerDied","Data":"73c09076a730004d0e97ee0c94b37cb907413f07ceb9bd305d605f74f3920dac"} Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.076723 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73c09076a730004d0e97ee0c94b37cb907413f07ceb9bd305d605f74f3920dac" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.076686 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-zv5xb" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.078271 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-9f5q7" event={"ID":"65702811-f528-4d05-a240-af4d28db992b","Type":"ContainerDied","Data":"d1035cec672a10b7bc55b8ad19f7382e3250f032d6e7c26cc1c863060433d6b6"} Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.078302 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1035cec672a10b7bc55b8ad19f7382e3250f032d6e7c26cc1c863060433d6b6" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.078338 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-9f5q7" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.660888 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hz6gf-config-zv5xb"] Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.669496 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hz6gf-config-zv5xb"] Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.679582 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-hz6gf" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.789585 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hz6gf-config-n5567"] Jan 29 13:36:23 crc kubenswrapper[4787]: E0129 13:36:23.789918 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="init" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.789934 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="init" Jan 29 13:36:23 crc kubenswrapper[4787]: E0129 13:36:23.789966 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="dnsmasq-dns" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.789973 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="dnsmasq-dns" Jan 29 13:36:23 crc kubenswrapper[4787]: E0129 13:36:23.789985 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29912110-dfbe-401f-aba1-0cd16e30cfb6" containerName="ovn-config" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.789991 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="29912110-dfbe-401f-aba1-0cd16e30cfb6" containerName="ovn-config" Jan 29 13:36:23 crc kubenswrapper[4787]: E0129 13:36:23.790000 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65702811-f528-4d05-a240-af4d28db992b" containerName="mariadb-account-create-update" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.790016 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="65702811-f528-4d05-a240-af4d28db992b" containerName="mariadb-account-create-update" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.790187 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="29912110-dfbe-401f-aba1-0cd16e30cfb6" containerName="ovn-config" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.790211 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="87c258a2-fcd7-47eb-a3a0-a8a1475c6a16" containerName="dnsmasq-dns" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.790224 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="65702811-f528-4d05-a240-af4d28db992b" containerName="mariadb-account-create-update" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.790762 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.792926 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.804188 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf-config-n5567"] Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.907952 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.908006 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.908052 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.908079 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.908099 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn8rz\" (UniqueName: \"kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.908146 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:23 crc kubenswrapper[4787]: I0129 13:36:23.995076 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29912110-dfbe-401f-aba1-0cd16e30cfb6" path="/var/lib/kubelet/pods/29912110-dfbe-401f-aba1-0cd16e30cfb6/volumes" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.009278 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.009501 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.009613 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.009720 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn8rz\" (UniqueName: \"kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.010034 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.010135 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.010240 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.010425 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.010582 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.011393 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.011644 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.028241 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn8rz\" (UniqueName: \"kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz\") pod \"ovn-controller-hz6gf-config-n5567\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.105435 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:24 crc kubenswrapper[4787]: I0129 13:36:24.562956 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hz6gf-config-n5567"] Jan 29 13:36:25 crc kubenswrapper[4787]: I0129 13:36:25.094304 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9zlwc" event={"ID":"fdeb5cf3-4f9a-4f9d-8559-648f5079397a","Type":"ContainerStarted","Data":"72b2ec0c4cd5c043ceebdfa345faa2e0f31d571e8b19d5ab485f4d1a72bf4bc4"} Jan 29 13:36:25 crc kubenswrapper[4787]: I0129 13:36:25.096647 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-n5567" event={"ID":"9b423d73-24ec-4f6b-8c78-cf3daee64256","Type":"ContainerStarted","Data":"785c1e7301248da493293904a73624de786cfb2556a39349b67654eabdef4cce"} Jan 29 13:36:25 crc kubenswrapper[4787]: I0129 13:36:25.096995 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-n5567" event={"ID":"9b423d73-24ec-4f6b-8c78-cf3daee64256","Type":"ContainerStarted","Data":"83fbff49b7fc56ed4092b155f7298160d285e5aa2da26d0e0c4036c3e31986cb"} Jan 29 13:36:25 crc kubenswrapper[4787]: I0129 13:36:25.118563 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-9zlwc" podStartSLOduration=11.130357118 podStartE2EDuration="16.118543374s" podCreationTimestamp="2026-01-29 13:36:09 +0000 UTC" firstStartedPulling="2026-01-29 13:36:18.855978077 +0000 UTC m=+1217.617238353" lastFinishedPulling="2026-01-29 13:36:23.844164333 +0000 UTC m=+1222.605424609" observedRunningTime="2026-01-29 13:36:25.117446949 +0000 UTC m=+1223.878707225" watchObservedRunningTime="2026-01-29 13:36:25.118543374 +0000 UTC m=+1223.879803660" Jan 29 13:36:25 crc kubenswrapper[4787]: I0129 13:36:25.150931 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hz6gf-config-n5567" podStartSLOduration=2.150907821 podStartE2EDuration="2.150907821s" podCreationTimestamp="2026-01-29 13:36:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:25.144036602 +0000 UTC m=+1223.905296878" watchObservedRunningTime="2026-01-29 13:36:25.150907821 +0000 UTC m=+1223.912168087" Jan 29 13:36:26 crc kubenswrapper[4787]: I0129 13:36:26.105896 4787 generic.go:334] "Generic (PLEG): container finished" podID="9b423d73-24ec-4f6b-8c78-cf3daee64256" containerID="785c1e7301248da493293904a73624de786cfb2556a39349b67654eabdef4cce" exitCode=0 Jan 29 13:36:26 crc kubenswrapper[4787]: I0129 13:36:26.105961 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-n5567" event={"ID":"9b423d73-24ec-4f6b-8c78-cf3daee64256","Type":"ContainerDied","Data":"785c1e7301248da493293904a73624de786cfb2556a39349b67654eabdef4cce"} Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.483592 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.667772 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.667844 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.667886 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.667893 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run" (OuterVolumeSpecName: "var-run") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.667928 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668001 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668055 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668075 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668155 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn8rz\" (UniqueName: \"kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz\") pod \"9b423d73-24ec-4f6b-8c78-cf3daee64256\" (UID: \"9b423d73-24ec-4f6b-8c78-cf3daee64256\") " Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668814 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668961 4787 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668974 4787 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668984 4787 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668995 4787 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b423d73-24ec-4f6b-8c78-cf3daee64256-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.668999 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts" (OuterVolumeSpecName: "scripts") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.673444 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz" (OuterVolumeSpecName: "kube-api-access-vn8rz") pod "9b423d73-24ec-4f6b-8c78-cf3daee64256" (UID: "9b423d73-24ec-4f6b-8c78-cf3daee64256"). InnerVolumeSpecName "kube-api-access-vn8rz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.770687 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b423d73-24ec-4f6b-8c78-cf3daee64256-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:27 crc kubenswrapper[4787]: I0129 13:36:27.770724 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn8rz\" (UniqueName: \"kubernetes.io/projected/9b423d73-24ec-4f6b-8c78-cf3daee64256-kube-api-access-vn8rz\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:28 crc kubenswrapper[4787]: I0129 13:36:28.120224 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf-config-n5567" event={"ID":"9b423d73-24ec-4f6b-8c78-cf3daee64256","Type":"ContainerDied","Data":"83fbff49b7fc56ed4092b155f7298160d285e5aa2da26d0e0c4036c3e31986cb"} Jan 29 13:36:28 crc kubenswrapper[4787]: I0129 13:36:28.120266 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83fbff49b7fc56ed4092b155f7298160d285e5aa2da26d0e0c4036c3e31986cb" Jan 29 13:36:28 crc kubenswrapper[4787]: I0129 13:36:28.120280 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf-config-n5567" Jan 29 13:36:28 crc kubenswrapper[4787]: I0129 13:36:28.586154 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hz6gf-config-n5567"] Jan 29 13:36:28 crc kubenswrapper[4787]: I0129 13:36:28.592800 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hz6gf-config-n5567"] Jan 29 13:36:29 crc kubenswrapper[4787]: I0129 13:36:29.980672 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 29 13:36:29 crc kubenswrapper[4787]: I0129 13:36:29.997281 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b423d73-24ec-4f6b-8c78-cf3daee64256" path="/var/lib/kubelet/pods/9b423d73-24ec-4f6b-8c78-cf3daee64256/volumes" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.374471 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-pvfrj"] Jan 29 13:36:30 crc kubenswrapper[4787]: E0129 13:36:30.374996 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b423d73-24ec-4f6b-8c78-cf3daee64256" containerName="ovn-config" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.375011 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b423d73-24ec-4f6b-8c78-cf3daee64256" containerName="ovn-config" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.375160 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b423d73-24ec-4f6b-8c78-cf3daee64256" containerName="ovn-config" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.375656 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.394602 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-fedf-account-create-update-qqj9j"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.395861 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.398090 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.412995 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-pvfrj"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.418641 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fedf-account-create-update-qqj9j"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.427617 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.474241 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-6fbps"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.475223 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.499890 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-6fbps"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.515951 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4xh2\" (UniqueName: \"kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.516038 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.516066 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.516337 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwbkf\" (UniqueName: \"kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.586837 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-04f3-account-create-update-6ckbf"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.588045 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.590325 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.596150 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-04f3-account-create-update-6ckbf"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.617954 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv44l\" (UniqueName: \"kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.618040 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwbkf\" (UniqueName: \"kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.618082 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4xh2\" (UniqueName: \"kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.618133 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.618157 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.618283 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.619486 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.620069 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.652058 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4xh2\" (UniqueName: \"kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2\") pod \"cinder-fedf-account-create-update-qqj9j\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.654109 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwbkf\" (UniqueName: \"kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf\") pod \"cinder-db-create-pvfrj\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.663295 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-zlc9h"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.664433 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.666806 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.667026 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.667137 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.667285 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2tqv" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.684025 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-zlc9h"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.690925 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.704021 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-70cd-account-create-update-2xcb2"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.705970 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.708073 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.708304 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720300 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720336 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hspx\" (UniqueName: \"kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720375 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5spcb\" (UniqueName: \"kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720426 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720493 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv44l\" (UniqueName: \"kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720512 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.720573 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.721227 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.738106 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-70cd-account-create-update-2xcb2"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.757765 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dv44l\" (UniqueName: \"kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l\") pod \"barbican-db-create-6fbps\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.797062 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.799920 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-vtzh4"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.800888 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.822948 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.822986 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hspx\" (UniqueName: \"kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.823013 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.823042 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5spcb\" (UniqueName: \"kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.823066 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lhsb\" (UniqueName: \"kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.823121 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.823147 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.825290 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.825646 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-vtzh4"] Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.826675 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.828128 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.858843 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hspx\" (UniqueName: \"kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx\") pod \"barbican-04f3-account-create-update-6ckbf\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.864807 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5spcb\" (UniqueName: \"kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb\") pod \"keystone-db-sync-zlc9h\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.907862 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.924356 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsdzj\" (UniqueName: \"kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.924426 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.924524 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.924568 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lhsb\" (UniqueName: \"kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.926749 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:30 crc kubenswrapper[4787]: I0129 13:36:30.949440 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lhsb\" (UniqueName: \"kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb\") pod \"neutron-70cd-account-create-update-2xcb2\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.026593 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.026820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsdzj\" (UniqueName: \"kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.029565 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.049071 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsdzj\" (UniqueName: \"kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj\") pod \"neutron-db-create-vtzh4\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:31 crc kubenswrapper[4787]: E0129 13:36:31.122729 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbc05e24_bbf4_44e2_9cd3_40c095f56aea.slice/crio-ba5db01e0916c4c88d6a2536d7cef2d0569d87e7483b40333d2ad311475774dd.scope\": RecentStats: unable to find data in memory cache]" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.155403 4787 generic.go:334] "Generic (PLEG): container finished" podID="cbc05e24-bbf4-44e2-9cd3-40c095f56aea" containerID="ba5db01e0916c4c88d6a2536d7cef2d0569d87e7483b40333d2ad311475774dd" exitCode=0 Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.155631 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l5s5r" event={"ID":"cbc05e24-bbf4-44e2-9cd3-40c095f56aea","Type":"ContainerDied","Data":"ba5db01e0916c4c88d6a2536d7cef2d0569d87e7483b40333d2ad311475774dd"} Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.164792 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.196857 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.217832 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.435645 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fedf-account-create-update-qqj9j"] Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.445979 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-pvfrj"] Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.610084 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-6fbps"] Jan 29 13:36:31 crc kubenswrapper[4787]: W0129 13:36:31.637919 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59009fab_52fa_4e28_a87e_2fa7a49d9f7d.slice/crio-7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659 WatchSource:0}: Error finding container 7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659: Status 404 returned error can't find the container with id 7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659 Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.823894 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-zlc9h"] Jan 29 13:36:31 crc kubenswrapper[4787]: I0129 13:36:31.849477 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-70cd-account-create-update-2xcb2"] Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.008891 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-04f3-account-create-update-6ckbf"] Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.015915 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-vtzh4"] Jan 29 13:36:32 crc kubenswrapper[4787]: W0129 13:36:32.073387 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a5645bd_0519_4e87_850a_7cd4c72bd0c1.slice/crio-e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c WatchSource:0}: Error finding container e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c: Status 404 returned error can't find the container with id e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.175985 4787 generic.go:334] "Generic (PLEG): container finished" podID="9dad383e-4d7c-485c-8a28-cf87a91f3370" containerID="23f1b5e0219271a3b5ec4767dfc35d3f262582364b3cf8063f3e6a39fa559f84" exitCode=0 Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.176244 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fedf-account-create-update-qqj9j" event={"ID":"9dad383e-4d7c-485c-8a28-cf87a91f3370","Type":"ContainerDied","Data":"23f1b5e0219271a3b5ec4767dfc35d3f262582364b3cf8063f3e6a39fa559f84"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.176268 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fedf-account-create-update-qqj9j" event={"ID":"9dad383e-4d7c-485c-8a28-cf87a91f3370","Type":"ContainerStarted","Data":"a1df5c92317fd9c781be726f32010ad63a2717ad87ad0b43500f2aa3836b3184"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.181172 4787 generic.go:334] "Generic (PLEG): container finished" podID="fdeb5cf3-4f9a-4f9d-8559-648f5079397a" containerID="72b2ec0c4cd5c043ceebdfa345faa2e0f31d571e8b19d5ab485f4d1a72bf4bc4" exitCode=0 Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.181219 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9zlwc" event={"ID":"fdeb5cf3-4f9a-4f9d-8559-648f5079397a","Type":"ContainerDied","Data":"72b2ec0c4cd5c043ceebdfa345faa2e0f31d571e8b19d5ab485f4d1a72bf4bc4"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.182371 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-04f3-account-create-update-6ckbf" event={"ID":"af36f69e-989c-40df-b146-df2168789b88","Type":"ContainerStarted","Data":"d4f5002babe9a7552477c186ebe9838f2d9bc475f44046beba2ec2355b52dca7"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.184105 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vtzh4" event={"ID":"0a5645bd-0519-4e87-850a-7cd4c72bd0c1","Type":"ContainerStarted","Data":"e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.185370 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-2xcb2" event={"ID":"af317c31-0e4f-4ddb-8044-a9ce9965f264","Type":"ContainerStarted","Data":"3037de86958a058bff127ccea5ba7107871ceffc84b77ab0c6edad39caa969c6"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.185390 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-2xcb2" event={"ID":"af317c31-0e4f-4ddb-8044-a9ce9965f264","Type":"ContainerStarted","Data":"979bd106b3e058b20c9e7ccca562d86aa7160b3a0a974b99856d0c473c410303"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.188485 4787 generic.go:334] "Generic (PLEG): container finished" podID="012c4748-24f7-48bd-983a-481f3f544724" containerID="c85f5b8912965f59076414fe6d1f7dcf416f22854bbc47dfb3585b9813f486d6" exitCode=0 Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.188525 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-pvfrj" event={"ID":"012c4748-24f7-48bd-983a-481f3f544724","Type":"ContainerDied","Data":"c85f5b8912965f59076414fe6d1f7dcf416f22854bbc47dfb3585b9813f486d6"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.188540 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-pvfrj" event={"ID":"012c4748-24f7-48bd-983a-481f3f544724","Type":"ContainerStarted","Data":"f9d909013ee1063852e1cf7c4c01dea4170d348d18241b0bc1ec30cf46fa6e50"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.195779 4787 generic.go:334] "Generic (PLEG): container finished" podID="59009fab-52fa-4e28-a87e-2fa7a49d9f7d" containerID="8f10ec30349c15378a4bec9d811f8d1bc66a9162353f7017ea7de25fe5af7928" exitCode=0 Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.195953 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6fbps" event={"ID":"59009fab-52fa-4e28-a87e-2fa7a49d9f7d","Type":"ContainerDied","Data":"8f10ec30349c15378a4bec9d811f8d1bc66a9162353f7017ea7de25fe5af7928"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.195976 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6fbps" event={"ID":"59009fab-52fa-4e28-a87e-2fa7a49d9f7d","Type":"ContainerStarted","Data":"7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.197895 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zlc9h" event={"ID":"ea682f38-6eee-4ea1-beea-bcd14edc880e","Type":"ContainerStarted","Data":"b7c4ae0a16ad14940d2fc60f328c511d5a52ed5232dbf1ca005694fec0b3b214"} Jan 29 13:36:32 crc kubenswrapper[4787]: I0129 13:36:32.228424 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-70cd-account-create-update-2xcb2" podStartSLOduration=2.228410076 podStartE2EDuration="2.228410076s" podCreationTimestamp="2026-01-29 13:36:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:32.225917059 +0000 UTC m=+1230.987177335" watchObservedRunningTime="2026-01-29 13:36:32.228410076 +0000 UTC m=+1230.989670352" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.797030 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.889171 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data\") pod \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.889529 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle\") pod \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.889634 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data\") pod \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.889709 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6sxm\" (UniqueName: \"kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm\") pod \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\" (UID: \"cbc05e24-bbf4-44e2-9cd3-40c095f56aea\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.904828 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cbc05e24-bbf4-44e2-9cd3-40c095f56aea" (UID: "cbc05e24-bbf4-44e2-9cd3-40c095f56aea"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.906687 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm" (OuterVolumeSpecName: "kube-api-access-h6sxm") pod "cbc05e24-bbf4-44e2-9cd3-40c095f56aea" (UID: "cbc05e24-bbf4-44e2-9cd3-40c095f56aea"). InnerVolumeSpecName "kube-api-access-h6sxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.921897 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbc05e24-bbf4-44e2-9cd3-40c095f56aea" (UID: "cbc05e24-bbf4-44e2-9cd3-40c095f56aea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.961579 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data" (OuterVolumeSpecName: "config-data") pod "cbc05e24-bbf4-44e2-9cd3-40c095f56aea" (UID: "cbc05e24-bbf4-44e2-9cd3-40c095f56aea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.991377 4787 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.991411 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.991420 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:32.991429 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6sxm\" (UniqueName: \"kubernetes.io/projected/cbc05e24-bbf4-44e2-9cd3-40c095f56aea-kube-api-access-h6sxm\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.208361 4787 generic.go:334] "Generic (PLEG): container finished" podID="af317c31-0e4f-4ddb-8044-a9ce9965f264" containerID="3037de86958a058bff127ccea5ba7107871ceffc84b77ab0c6edad39caa969c6" exitCode=0 Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.208584 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-2xcb2" event={"ID":"af317c31-0e4f-4ddb-8044-a9ce9965f264","Type":"ContainerDied","Data":"3037de86958a058bff127ccea5ba7107871ceffc84b77ab0c6edad39caa969c6"} Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.210381 4787 generic.go:334] "Generic (PLEG): container finished" podID="af36f69e-989c-40df-b146-df2168789b88" containerID="724e336ca6bd3ec15b6bd66148b9cbec2928b11b984c9a6bb592e88e19b45827" exitCode=0 Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.210471 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-04f3-account-create-update-6ckbf" event={"ID":"af36f69e-989c-40df-b146-df2168789b88","Type":"ContainerDied","Data":"724e336ca6bd3ec15b6bd66148b9cbec2928b11b984c9a6bb592e88e19b45827"} Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.212466 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-l5s5r" event={"ID":"cbc05e24-bbf4-44e2-9cd3-40c095f56aea","Type":"ContainerDied","Data":"bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa"} Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.212503 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfaf393cc3cde2125c9cd0a3644e348cffbdc7df892e427eb2ebe744770a10aa" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.212561 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-l5s5r" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.221859 4787 generic.go:334] "Generic (PLEG): container finished" podID="0a5645bd-0519-4e87-850a-7cd4c72bd0c1" containerID="9a3e281e4dd5be1e9e0bcf9ef19644f02ecd2eb343bc9fdd17548cccefe639cd" exitCode=0 Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.222106 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vtzh4" event={"ID":"0a5645bd-0519-4e87-850a-7cd4c72bd0c1","Type":"ContainerDied","Data":"9a3e281e4dd5be1e9e0bcf9ef19644f02ecd2eb343bc9fdd17548cccefe639cd"} Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.567317 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:36:33 crc kubenswrapper[4787]: E0129 13:36:33.569970 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbc05e24-bbf4-44e2-9cd3-40c095f56aea" containerName="glance-db-sync" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.569988 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbc05e24-bbf4-44e2-9cd3-40c095f56aea" containerName="glance-db-sync" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.570140 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbc05e24-bbf4-44e2-9cd3-40c095f56aea" containerName="glance-db-sync" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.570951 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.591220 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.607200 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.607258 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.607319 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.607341 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4xdj\" (UniqueName: \"kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.607400 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.712350 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.712443 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.712485 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.712531 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.712547 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4xdj\" (UniqueName: \"kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.713714 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.714211 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.714707 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.715173 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.736680 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.743435 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4xdj\" (UniqueName: \"kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj\") pod \"dnsmasq-dns-6bfd654465-tln9h\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.822008 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4xh2\" (UniqueName: \"kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2\") pod \"9dad383e-4d7c-485c-8a28-cf87a91f3370\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.822125 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts\") pod \"9dad383e-4d7c-485c-8a28-cf87a91f3370\" (UID: \"9dad383e-4d7c-485c-8a28-cf87a91f3370\") " Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.823331 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dad383e-4d7c-485c-8a28-cf87a91f3370" (UID: "9dad383e-4d7c-485c-8a28-cf87a91f3370"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.857600 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2" (OuterVolumeSpecName: "kube-api-access-s4xh2") pod "9dad383e-4d7c-485c-8a28-cf87a91f3370" (UID: "9dad383e-4d7c-485c-8a28-cf87a91f3370"). InnerVolumeSpecName "kube-api-access-s4xh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.898186 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.924168 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dad383e-4d7c-485c-8a28-cf87a91f3370-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:33 crc kubenswrapper[4787]: I0129 13:36:33.924484 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4xh2\" (UniqueName: \"kubernetes.io/projected/9dad383e-4d7c-485c-8a28-cf87a91f3370-kube-api-access-s4xh2\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.036948 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.037491 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.044713 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131160 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131196 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dv44l\" (UniqueName: \"kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l\") pod \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131295 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131334 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131368 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwbkf\" (UniqueName: \"kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf\") pod \"012c4748-24f7-48bd-983a-481f3f544724\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131392 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131421 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts\") pod \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\" (UID: \"59009fab-52fa-4e28-a87e-2fa7a49d9f7d\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131437 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131523 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g76g7\" (UniqueName: \"kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131574 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts\") pod \"012c4748-24f7-48bd-983a-481f3f544724\" (UID: \"012c4748-24f7-48bd-983a-481f3f544724\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.131627 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift\") pod \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\" (UID: \"fdeb5cf3-4f9a-4f9d-8559-648f5079397a\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.132078 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.132709 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.137842 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7" (OuterVolumeSpecName: "kube-api-access-g76g7") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "kube-api-access-g76g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.139796 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "012c4748-24f7-48bd-983a-481f3f544724" (UID: "012c4748-24f7-48bd-983a-481f3f544724"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.143977 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "59009fab-52fa-4e28-a87e-2fa7a49d9f7d" (UID: "59009fab-52fa-4e28-a87e-2fa7a49d9f7d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.145171 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.147520 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf" (OuterVolumeSpecName: "kube-api-access-hwbkf") pod "012c4748-24f7-48bd-983a-481f3f544724" (UID: "012c4748-24f7-48bd-983a-481f3f544724"). InnerVolumeSpecName "kube-api-access-hwbkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.147706 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l" (OuterVolumeSpecName: "kube-api-access-dv44l") pod "59009fab-52fa-4e28-a87e-2fa7a49d9f7d" (UID: "59009fab-52fa-4e28-a87e-2fa7a49d9f7d"). InnerVolumeSpecName "kube-api-access-dv44l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.166788 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.184422 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts" (OuterVolumeSpecName: "scripts") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.193845 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "fdeb5cf3-4f9a-4f9d-8559-648f5079397a" (UID: "fdeb5cf3-4f9a-4f9d-8559-648f5079397a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233777 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/012c4748-24f7-48bd-983a-481f3f544724-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233807 4787 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233816 4787 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233824 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dv44l\" (UniqueName: \"kubernetes.io/projected/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-kube-api-access-dv44l\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233833 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233842 4787 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233862 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwbkf\" (UniqueName: \"kubernetes.io/projected/012c4748-24f7-48bd-983a-481f3f544724-kube-api-access-hwbkf\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233870 4787 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233879 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/59009fab-52fa-4e28-a87e-2fa7a49d9f7d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233886 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.233895 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g76g7\" (UniqueName: \"kubernetes.io/projected/fdeb5cf3-4f9a-4f9d-8559-648f5079397a-kube-api-access-g76g7\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.235322 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-pvfrj" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.235753 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-pvfrj" event={"ID":"012c4748-24f7-48bd-983a-481f3f544724","Type":"ContainerDied","Data":"f9d909013ee1063852e1cf7c4c01dea4170d348d18241b0bc1ec30cf46fa6e50"} Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.235827 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9d909013ee1063852e1cf7c4c01dea4170d348d18241b0bc1ec30cf46fa6e50" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.246893 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6fbps" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.246983 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6fbps" event={"ID":"59009fab-52fa-4e28-a87e-2fa7a49d9f7d","Type":"ContainerDied","Data":"7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659"} Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.247017 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ad6c4e80169f3b4cb245297b3828133dc9c7c91a9c3948800ecb5e33ac3e659" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.249040 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fedf-account-create-update-qqj9j" event={"ID":"9dad383e-4d7c-485c-8a28-cf87a91f3370","Type":"ContainerDied","Data":"a1df5c92317fd9c781be726f32010ad63a2717ad87ad0b43500f2aa3836b3184"} Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.249078 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1df5c92317fd9c781be726f32010ad63a2717ad87ad0b43500f2aa3836b3184" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.249130 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-qqj9j" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.255073 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9zlwc" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.255998 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9zlwc" event={"ID":"fdeb5cf3-4f9a-4f9d-8559-648f5079397a","Type":"ContainerDied","Data":"7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f"} Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.256025 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f04ee4e264f861002ac1f7c347a1d1a8178f07ebf20cc23b11d4cbf67c6085f" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.515922 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.647743 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.768093 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsdzj\" (UniqueName: \"kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj\") pod \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.768154 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts\") pod \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\" (UID: \"0a5645bd-0519-4e87-850a-7cd4c72bd0c1\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.769166 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a5645bd-0519-4e87-850a-7cd4c72bd0c1" (UID: "0a5645bd-0519-4e87-850a-7cd4c72bd0c1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.779763 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj" (OuterVolumeSpecName: "kube-api-access-wsdzj") pod "0a5645bd-0519-4e87-850a-7cd4c72bd0c1" (UID: "0a5645bd-0519-4e87-850a-7cd4c72bd0c1"). InnerVolumeSpecName "kube-api-access-wsdzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.869666 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsdzj\" (UniqueName: \"kubernetes.io/projected/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-kube-api-access-wsdzj\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.869706 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a5645bd-0519-4e87-850a-7cd4c72bd0c1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.896152 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.918432 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.970795 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts\") pod \"af317c31-0e4f-4ddb-8044-a9ce9965f264\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.970897 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lhsb\" (UniqueName: \"kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb\") pod \"af317c31-0e4f-4ddb-8044-a9ce9965f264\" (UID: \"af317c31-0e4f-4ddb-8044-a9ce9965f264\") " Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.971432 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af317c31-0e4f-4ddb-8044-a9ce9965f264" (UID: "af317c31-0e4f-4ddb-8044-a9ce9965f264"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:34 crc kubenswrapper[4787]: I0129 13:36:34.978781 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb" (OuterVolumeSpecName: "kube-api-access-5lhsb") pod "af317c31-0e4f-4ddb-8044-a9ce9965f264" (UID: "af317c31-0e4f-4ddb-8044-a9ce9965f264"). InnerVolumeSpecName "kube-api-access-5lhsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.079885 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts\") pod \"af36f69e-989c-40df-b146-df2168789b88\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.080006 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hspx\" (UniqueName: \"kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx\") pod \"af36f69e-989c-40df-b146-df2168789b88\" (UID: \"af36f69e-989c-40df-b146-df2168789b88\") " Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.080361 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af36f69e-989c-40df-b146-df2168789b88" (UID: "af36f69e-989c-40df-b146-df2168789b88"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.081440 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lhsb\" (UniqueName: \"kubernetes.io/projected/af317c31-0e4f-4ddb-8044-a9ce9965f264-kube-api-access-5lhsb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.081544 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af317c31-0e4f-4ddb-8044-a9ce9965f264-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.081560 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af36f69e-989c-40df-b146-df2168789b88-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.084981 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx" (OuterVolumeSpecName: "kube-api-access-8hspx") pod "af36f69e-989c-40df-b146-df2168789b88" (UID: "af36f69e-989c-40df-b146-df2168789b88"). InnerVolumeSpecName "kube-api-access-8hspx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.183633 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hspx\" (UniqueName: \"kubernetes.io/projected/af36f69e-989c-40df-b146-df2168789b88-kube-api-access-8hspx\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.269542 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-04f3-account-create-update-6ckbf" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.270327 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-04f3-account-create-update-6ckbf" event={"ID":"af36f69e-989c-40df-b146-df2168789b88","Type":"ContainerDied","Data":"d4f5002babe9a7552477c186ebe9838f2d9bc475f44046beba2ec2355b52dca7"} Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.270416 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4f5002babe9a7552477c186ebe9838f2d9bc475f44046beba2ec2355b52dca7" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.274658 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-vtzh4" event={"ID":"0a5645bd-0519-4e87-850a-7cd4c72bd0c1","Type":"ContainerDied","Data":"e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c"} Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.274717 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2bd05d675013be149cd6661e95a850fe31d7a4c3d0c4d48165e74ec5ae91e6c" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.274815 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-vtzh4" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.298740 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerID="9f6b8c6119bbd9dfde36a5c6f71c3c39ab9475d0f24d64b2fc31e63d3b138c09" exitCode=0 Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.298839 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" event={"ID":"0d533171-7cfb-4ab5-9fca-d294ee78d912","Type":"ContainerDied","Data":"9f6b8c6119bbd9dfde36a5c6f71c3c39ab9475d0f24d64b2fc31e63d3b138c09"} Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.298883 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" event={"ID":"0d533171-7cfb-4ab5-9fca-d294ee78d912","Type":"ContainerStarted","Data":"6deec4370807100a320ad726300ce380e0353ef908329c292cd83a9ebbb3779d"} Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.307007 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-2xcb2" event={"ID":"af317c31-0e4f-4ddb-8044-a9ce9965f264","Type":"ContainerDied","Data":"979bd106b3e058b20c9e7ccca562d86aa7160b3a0a974b99856d0c473c410303"} Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.307051 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="979bd106b3e058b20c9e7ccca562d86aa7160b3a0a974b99856d0c473c410303" Jan 29 13:36:35 crc kubenswrapper[4787]: I0129 13:36:35.307080 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-2xcb2" Jan 29 13:36:36 crc kubenswrapper[4787]: I0129 13:36:36.317533 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" event={"ID":"0d533171-7cfb-4ab5-9fca-d294ee78d912","Type":"ContainerStarted","Data":"fed86be861d29e307074c02d4e5f6a4a5e8ebf89bc7bc4d49f92042fa6e9ec31"} Jan 29 13:36:36 crc kubenswrapper[4787]: I0129 13:36:36.317975 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:36 crc kubenswrapper[4787]: I0129 13:36:36.352694 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" podStartSLOduration=3.352671825 podStartE2EDuration="3.352671825s" podCreationTimestamp="2026-01-29 13:36:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:36.346347064 +0000 UTC m=+1235.107607350" watchObservedRunningTime="2026-01-29 13:36:36.352671825 +0000 UTC m=+1235.113932101" Jan 29 13:36:38 crc kubenswrapper[4787]: I0129 13:36:38.032049 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:38 crc kubenswrapper[4787]: I0129 13:36:38.053989 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"swift-storage-0\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " pod="openstack/swift-storage-0" Jan 29 13:36:38 crc kubenswrapper[4787]: I0129 13:36:38.171336 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 13:36:38 crc kubenswrapper[4787]: I0129 13:36:38.905690 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:36:39 crc kubenswrapper[4787]: I0129 13:36:39.344413 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zlc9h" event={"ID":"ea682f38-6eee-4ea1-beea-bcd14edc880e","Type":"ContainerStarted","Data":"725d17891c6e69acd5172d8d256ef2ce608f642076839459bd1443935ee559be"} Jan 29 13:36:39 crc kubenswrapper[4787]: I0129 13:36:39.345739 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"ed319c2cfaa1af9786cd58e89b63bdb61e854fda4b5e20a508f0011e00d59fd5"} Jan 29 13:36:39 crc kubenswrapper[4787]: I0129 13:36:39.364782 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-zlc9h" podStartSLOduration=2.766957153 podStartE2EDuration="9.364766658s" podCreationTimestamp="2026-01-29 13:36:30 +0000 UTC" firstStartedPulling="2026-01-29 13:36:31.835304084 +0000 UTC m=+1230.596564360" lastFinishedPulling="2026-01-29 13:36:38.433113589 +0000 UTC m=+1237.194373865" observedRunningTime="2026-01-29 13:36:39.360347852 +0000 UTC m=+1238.121608158" watchObservedRunningTime="2026-01-29 13:36:39.364766658 +0000 UTC m=+1238.126026934" Jan 29 13:36:40 crc kubenswrapper[4787]: I0129 13:36:40.355848 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e"} Jan 29 13:36:41 crc kubenswrapper[4787]: I0129 13:36:41.366005 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0"} Jan 29 13:36:41 crc kubenswrapper[4787]: I0129 13:36:41.366527 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be"} Jan 29 13:36:41 crc kubenswrapper[4787]: I0129 13:36:41.366541 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4"} Jan 29 13:36:42 crc kubenswrapper[4787]: I0129 13:36:42.394014 4787 generic.go:334] "Generic (PLEG): container finished" podID="ea682f38-6eee-4ea1-beea-bcd14edc880e" containerID="725d17891c6e69acd5172d8d256ef2ce608f642076839459bd1443935ee559be" exitCode=0 Jan 29 13:36:42 crc kubenswrapper[4787]: I0129 13:36:42.394987 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zlc9h" event={"ID":"ea682f38-6eee-4ea1-beea-bcd14edc880e","Type":"ContainerDied","Data":"725d17891c6e69acd5172d8d256ef2ce608f642076839459bd1443935ee559be"} Jan 29 13:36:42 crc kubenswrapper[4787]: I0129 13:36:42.402605 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197"} Jan 29 13:36:42 crc kubenswrapper[4787]: I0129 13:36:42.402644 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad"} Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.205326 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.213881 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c"} Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.275002 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.275277 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="dnsmasq-dns" containerID="cri-o://c89de210dc907d1b198039bb2de1c4745c6f38d0192e810a397149293e3411dd" gracePeriod=10 Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.599238 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.636919 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5spcb\" (UniqueName: \"kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb\") pod \"ea682f38-6eee-4ea1-beea-bcd14edc880e\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.636977 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle\") pod \"ea682f38-6eee-4ea1-beea-bcd14edc880e\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.637020 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data\") pod \"ea682f38-6eee-4ea1-beea-bcd14edc880e\" (UID: \"ea682f38-6eee-4ea1-beea-bcd14edc880e\") " Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.643119 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb" (OuterVolumeSpecName: "kube-api-access-5spcb") pod "ea682f38-6eee-4ea1-beea-bcd14edc880e" (UID: "ea682f38-6eee-4ea1-beea-bcd14edc880e"). InnerVolumeSpecName "kube-api-access-5spcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.679940 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data" (OuterVolumeSpecName: "config-data") pod "ea682f38-6eee-4ea1-beea-bcd14edc880e" (UID: "ea682f38-6eee-4ea1-beea-bcd14edc880e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.685417 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea682f38-6eee-4ea1-beea-bcd14edc880e" (UID: "ea682f38-6eee-4ea1-beea-bcd14edc880e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.739037 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5spcb\" (UniqueName: \"kubernetes.io/projected/ea682f38-6eee-4ea1-beea-bcd14edc880e-kube-api-access-5spcb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.739089 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:44 crc kubenswrapper[4787]: I0129 13:36:44.739107 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea682f38-6eee-4ea1-beea-bcd14edc880e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.218185 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: connect: connection refused" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.229655 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf"} Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.232112 4787 generic.go:334] "Generic (PLEG): container finished" podID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerID="c89de210dc907d1b198039bb2de1c4745c6f38d0192e810a397149293e3411dd" exitCode=0 Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.232159 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" event={"ID":"c7b05d84-4045-491a-98d9-e974a5ad3d86","Type":"ContainerDied","Data":"c89de210dc907d1b198039bb2de1c4745c6f38d0192e810a397149293e3411dd"} Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.233413 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zlc9h" event={"ID":"ea682f38-6eee-4ea1-beea-bcd14edc880e","Type":"ContainerDied","Data":"b7c4ae0a16ad14940d2fc60f328c511d5a52ed5232dbf1ca005694fec0b3b214"} Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.233445 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7c4ae0a16ad14940d2fc60f328c511d5a52ed5232dbf1ca005694fec0b3b214" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.235356 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zlc9h" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.324763 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325190 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af317c31-0e4f-4ddb-8044-a9ce9965f264" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325210 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="af317c31-0e4f-4ddb-8044-a9ce9965f264" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325231 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="012c4748-24f7-48bd-983a-481f3f544724" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325238 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="012c4748-24f7-48bd-983a-481f3f544724" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325254 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea682f38-6eee-4ea1-beea-bcd14edc880e" containerName="keystone-db-sync" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325262 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea682f38-6eee-4ea1-beea-bcd14edc880e" containerName="keystone-db-sync" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325275 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a5645bd-0519-4e87-850a-7cd4c72bd0c1" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325283 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a5645bd-0519-4e87-850a-7cd4c72bd0c1" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325291 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdeb5cf3-4f9a-4f9d-8559-648f5079397a" containerName="swift-ring-rebalance" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325299 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdeb5cf3-4f9a-4f9d-8559-648f5079397a" containerName="swift-ring-rebalance" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325320 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dad383e-4d7c-485c-8a28-cf87a91f3370" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325327 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dad383e-4d7c-485c-8a28-cf87a91f3370" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325340 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59009fab-52fa-4e28-a87e-2fa7a49d9f7d" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325347 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="59009fab-52fa-4e28-a87e-2fa7a49d9f7d" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.325359 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af36f69e-989c-40df-b146-df2168789b88" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325366 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="af36f69e-989c-40df-b146-df2168789b88" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325598 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dad383e-4d7c-485c-8a28-cf87a91f3370" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325614 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="59009fab-52fa-4e28-a87e-2fa7a49d9f7d" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325628 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea682f38-6eee-4ea1-beea-bcd14edc880e" containerName="keystone-db-sync" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325642 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="af317c31-0e4f-4ddb-8044-a9ce9965f264" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325654 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="af36f69e-989c-40df-b146-df2168789b88" containerName="mariadb-account-create-update" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325666 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="012c4748-24f7-48bd-983a-481f3f544724" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325680 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdeb5cf3-4f9a-4f9d-8559-648f5079397a" containerName="swift-ring-rebalance" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.325688 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a5645bd-0519-4e87-850a-7cd4c72bd0c1" containerName="mariadb-database-create" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.326717 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.338399 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-56d6b"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.352040 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.357318 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.357590 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2tqv" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.357630 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.358495 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.360779 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.392728 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.437757 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-56d6b"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463125 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463332 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463394 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463441 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r9hv\" (UniqueName: \"kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463483 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463540 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463584 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463654 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463708 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463740 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77x2q\" (UniqueName: \"kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.463778 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.522897 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-rr4qj"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.524101 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.528190 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-2ltwr" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.528747 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.528784 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.555129 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-rr4qj"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565212 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565290 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565343 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565374 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565414 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565441 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77x2q\" (UniqueName: \"kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565538 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565569 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565593 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565630 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565657 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565682 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftjdb\" (UniqueName: \"kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565750 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565786 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565817 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565847 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r9hv\" (UniqueName: \"kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.565869 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.582524 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.583309 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.594709 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.596436 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.596841 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.598028 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.605340 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.609149 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.620806 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.625197 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.627236 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.640218 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.641347 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.659323 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.667945 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77x2q\" (UniqueName: \"kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q\") pod \"keystone-bootstrap-56d6b\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670268 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670341 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670381 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670409 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670439 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt7tf\" (UniqueName: \"kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670490 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670537 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjdb\" (UniqueName: \"kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670566 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670596 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670628 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670657 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670685 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.670734 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.671121 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.676829 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.682950 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.687907 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.688393 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.698993 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r9hv\" (UniqueName: \"kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv\") pod \"dnsmasq-dns-99559fbf5-rrjwp\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.712274 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftjdb\" (UniqueName: \"kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb\") pod \"cinder-db-sync-rr4qj\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.727289 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.743124 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-j2hxl"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.745918 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.759332 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.759761 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.759902 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6gpn9" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771638 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771686 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771723 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pjd8\" (UniqueName: \"kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771746 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771783 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt7tf\" (UniqueName: \"kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771806 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771827 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771848 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771868 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.771885 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.775930 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.776578 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.776724 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.794706 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.794970 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.795166 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.797724 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-p77h8"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.798990 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.818741 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt7tf\" (UniqueName: \"kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf\") pod \"ceilometer-0\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " pod="openstack/ceilometer-0" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.821653 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.821910 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-j2hxl"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.825030 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rjz4t" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.836755 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p77h8"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.839352 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.849881 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.861513 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.862021 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.874406 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.874476 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pjd8\" (UniqueName: \"kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.874974 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.879138 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.885144 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.893635 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-jp978"] Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.894104 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="dnsmasq-dns" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.894121 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="dnsmasq-dns" Jan 29 13:36:45 crc kubenswrapper[4787]: E0129 13:36:45.894159 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="init" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.894169 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="init" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.894358 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" containerName="dnsmasq-dns" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.895045 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.897319 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pjd8\" (UniqueName: \"kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8\") pod \"neutron-db-sync-j2hxl\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.901991 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.902159 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.902568 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jp978"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.903904 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nz8rd" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.915372 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.916870 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.926479 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976314 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj6gm\" (UniqueName: \"kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm\") pod \"c7b05d84-4045-491a-98d9-e974a5ad3d86\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976433 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb\") pod \"c7b05d84-4045-491a-98d9-e974a5ad3d86\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976490 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc\") pod \"c7b05d84-4045-491a-98d9-e974a5ad3d86\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976526 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config\") pod \"c7b05d84-4045-491a-98d9-e974a5ad3d86\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976565 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb\") pod \"c7b05d84-4045-491a-98d9-e974a5ad3d86\" (UID: \"c7b05d84-4045-491a-98d9-e974a5ad3d86\") " Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976740 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976775 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976803 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976829 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976887 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976910 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976931 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976954 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh4kj\" (UniqueName: \"kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.976979 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgk45\" (UniqueName: \"kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.977021 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.977053 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.977105 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.977199 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nncgg\" (UniqueName: \"kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:45 crc kubenswrapper[4787]: I0129 13:36:45.985244 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm" (OuterVolumeSpecName: "kube-api-access-vj6gm") pod "c7b05d84-4045-491a-98d9-e974a5ad3d86" (UID: "c7b05d84-4045-491a-98d9-e974a5ad3d86"). InnerVolumeSpecName "kube-api-access-vj6gm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.029244 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config" (OuterVolumeSpecName: "config") pod "c7b05d84-4045-491a-98d9-e974a5ad3d86" (UID: "c7b05d84-4045-491a-98d9-e974a5ad3d86"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.035163 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c7b05d84-4045-491a-98d9-e974a5ad3d86" (UID: "c7b05d84-4045-491a-98d9-e974a5ad3d86"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.038627 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c7b05d84-4045-491a-98d9-e974a5ad3d86" (UID: "c7b05d84-4045-491a-98d9-e974a5ad3d86"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.051843 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c7b05d84-4045-491a-98d9-e974a5ad3d86" (UID: "c7b05d84-4045-491a-98d9-e974a5ad3d86"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079151 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079442 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079586 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079707 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079816 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh4kj\" (UniqueName: \"kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.079932 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgk45\" (UniqueName: \"kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080096 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080219 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080329 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080427 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nncgg\" (UniqueName: \"kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080651 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080758 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.080882 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.081023 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj6gm\" (UniqueName: \"kubernetes.io/projected/c7b05d84-4045-491a-98d9-e974a5ad3d86-kube-api-access-vj6gm\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.081118 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.081215 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.081303 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.081395 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7b05d84-4045-491a-98d9-e974a5ad3d86-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.082339 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.084345 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.084625 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.085370 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.085991 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.098035 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.098424 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.103621 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.103914 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.103916 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.107101 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.110576 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh4kj\" (UniqueName: \"kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj\") pod \"barbican-db-sync-p77h8\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.122277 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgk45\" (UniqueName: \"kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45\") pod \"placement-db-sync-jp978\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.149428 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nncgg\" (UniqueName: \"kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg\") pod \"dnsmasq-dns-67f84f7cd9-xr7br\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.159812 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.162447 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p77h8" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.230776 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jp978" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.246624 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.255515 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" event={"ID":"c7b05d84-4045-491a-98d9-e974a5ad3d86","Type":"ContainerDied","Data":"c96b2a838d749f373a3776419bfd46a50e6bd086b5ab237ab7b73861df622fbe"} Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.255563 4787 scope.go:117] "RemoveContainer" containerID="c89de210dc907d1b198039bb2de1c4745c6f38d0192e810a397149293e3411dd" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.255694 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-kxw2m" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.322736 4787 scope.go:117] "RemoveContainer" containerID="234ee080c589fad4dd9d9f3bb31393a993de18379e3d15dbfeb20ef186e88f88" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.325745 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.338942 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-kxw2m"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.482706 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.492000 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.495193 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.495441 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.495316 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-tk4jm" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.501316 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.521779 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.557206 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.559089 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.561267 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.561618 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.623386 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.623670 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqsnk\" (UniqueName: \"kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.623701 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.623728 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.624003 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.624179 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.624239 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.624280 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.638392 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726290 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726392 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726449 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqsnk\" (UniqueName: \"kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726505 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726547 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726582 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726611 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fzl6\" (UniqueName: \"kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726669 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726697 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726728 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726765 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726791 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726821 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726861 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726883 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.726910 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.727285 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.730278 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.731797 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.738744 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.741390 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.742435 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.742618 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-56d6b"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.745481 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.751627 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqsnk\" (UniqueName: \"kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.767272 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828249 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828363 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828394 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fzl6\" (UniqueName: \"kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828441 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828486 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828517 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828553 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828579 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.828845 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.830508 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.830641 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.832180 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.833467 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.845903 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.849392 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.854853 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.865024 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fzl6\" (UniqueName: \"kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.869009 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.890229 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.972613 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.989999 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-rr4qj"] Jan 29 13:36:46 crc kubenswrapper[4787]: I0129 13:36:46.998650 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.007282 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-j2hxl"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.167785 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p77h8"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.178685 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-jp978"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.188908 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:36:47 crc kubenswrapper[4787]: W0129 13:36:47.248810 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2d6dbc2_04fe_4797_818d_fb90c0ab7287.slice/crio-97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2 WatchSource:0}: Error finding container 97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2: Status 404 returned error can't find the container with id 97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2 Jan 29 13:36:47 crc kubenswrapper[4787]: W0129 13:36:47.253248 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63ca8415_8ac5_4c3d_9fca_98a46e8a6da7.slice/crio-7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4 WatchSource:0}: Error finding container 7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4: Status 404 returned error can't find the container with id 7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4 Jan 29 13:36:47 crc kubenswrapper[4787]: W0129 13:36:47.254336 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20a74a62_29d2_42f7_8e89_10746401cc47.slice/crio-32e75d71b22149136b69bfb2b23a53e8cc40a98663c956b52dee9714480ded88 WatchSource:0}: Error finding container 32e75d71b22149136b69bfb2b23a53e8cc40a98663c956b52dee9714480ded88: Status 404 returned error can't find the container with id 32e75d71b22149136b69bfb2b23a53e8cc40a98663c956b52dee9714480ded88 Jan 29 13:36:47 crc kubenswrapper[4787]: W0129 13:36:47.260582 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode13248de_b9b0_4027_bc49_e5a6ea72cf71.slice/crio-17319a62c07409d42a8a80ebbdd4927a463b32fbc31640b2522725374411ae71 WatchSource:0}: Error finding container 17319a62c07409d42a8a80ebbdd4927a463b32fbc31640b2522725374411ae71: Status 404 returned error can't find the container with id 17319a62c07409d42a8a80ebbdd4927a463b32fbc31640b2522725374411ae71 Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.272208 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p77h8" event={"ID":"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7","Type":"ContainerStarted","Data":"7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4"} Jan 29 13:36:47 crc kubenswrapper[4787]: W0129 13:36:47.274619 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd111897d_ac6e_4f19_bbbb_b61e1b34deee.slice/crio-b03d5f7c98150fb948e0ba0595f2b93e2eea5072d6a64752499b00f794f783cc WatchSource:0}: Error finding container b03d5f7c98150fb948e0ba0595f2b93e2eea5072d6a64752499b00f794f783cc: Status 404 returned error can't find the container with id b03d5f7c98150fb948e0ba0595f2b93e2eea5072d6a64752499b00f794f783cc Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.277055 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-56d6b" event={"ID":"93e1e85e-d253-48eb-b0fe-9eb484551076","Type":"ContainerStarted","Data":"bfbfac753b6dfa81134b61c8ef729d8e8b2829e9eabd4059c428538b8fbcd0c2"} Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.279117 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" event={"ID":"20a74a62-29d2-42f7-8e89-10746401cc47","Type":"ContainerStarted","Data":"32e75d71b22149136b69bfb2b23a53e8cc40a98663c956b52dee9714480ded88"} Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.282024 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jp978" event={"ID":"e2d6dbc2-04fe-4797-818d-fb90c0ab7287","Type":"ContainerStarted","Data":"97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2"} Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.775072 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.856535 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:47 crc kubenswrapper[4787]: I0129 13:36:47.867304 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.018011 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7b05d84-4045-491a-98d9-e974a5ad3d86" path="/var/lib/kubelet/pods/c7b05d84-4045-491a-98d9-e974a5ad3d86/volumes" Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.043499 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.103326 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.304026 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-56d6b" event={"ID":"93e1e85e-d253-48eb-b0fe-9eb484551076","Type":"ContainerStarted","Data":"736791f07d105c1f37aa90173faebd8d5e29477a8d86b9bfda3cfa33aab306fa"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.314728 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerStarted","Data":"17319a62c07409d42a8a80ebbdd4927a463b32fbc31640b2522725374411ae71"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.317921 4787 generic.go:334] "Generic (PLEG): container finished" podID="20a74a62-29d2-42f7-8e89-10746401cc47" containerID="5649f376f8260efeb0246306923ee1c4b9714541f81c7b517345bb8344516e81" exitCode=0 Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.326722 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" event={"ID":"20a74a62-29d2-42f7-8e89-10746401cc47","Type":"ContainerDied","Data":"5649f376f8260efeb0246306923ee1c4b9714541f81c7b517345bb8344516e81"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.329047 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-56d6b" podStartSLOduration=3.329026544 podStartE2EDuration="3.329026544s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:48.328068546 +0000 UTC m=+1247.089328822" watchObservedRunningTime="2026-01-29 13:36:48.329026544 +0000 UTC m=+1247.090286820" Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.361172 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.361224 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.374653 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rr4qj" event={"ID":"05d80766-0024-4274-934c-0c6e206e5de0","Type":"ContainerStarted","Data":"059e835e2c1db4218f2abd662c77102803437c86e42932771c4c48be1ea43c63"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.383362 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerStarted","Data":"9c6ddd3d23456e89d4636b0d8c593b5bed4f1eb14b4a536a04a780662c160e45"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.394324 4787 generic.go:334] "Generic (PLEG): container finished" podID="d111897d-ac6e-4f19-bbbb-b61e1b34deee" containerID="82b59301b0d5d8dbdac71fdf979cb63a8337353d3706fffb71b0c1eb1249e371" exitCode=0 Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.395391 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" event={"ID":"d111897d-ac6e-4f19-bbbb-b61e1b34deee","Type":"ContainerDied","Data":"82b59301b0d5d8dbdac71fdf979cb63a8337353d3706fffb71b0c1eb1249e371"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.395438 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" event={"ID":"d111897d-ac6e-4f19-bbbb-b61e1b34deee","Type":"ContainerStarted","Data":"b03d5f7c98150fb948e0ba0595f2b93e2eea5072d6a64752499b00f794f783cc"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.402003 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerStarted","Data":"29ab489cd9accbcd9660e5674a1f52a50ce992427327ec1e6f35218ab3b95ad0"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.405977 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j2hxl" event={"ID":"62924dc0-4190-4229-a277-6a3a1f775498","Type":"ContainerStarted","Data":"e1b6628372053d9ce300b29e161a3428b69bea52b970a5b5a24b898c530045d1"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.406005 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j2hxl" event={"ID":"62924dc0-4190-4229-a277-6a3a1f775498","Type":"ContainerStarted","Data":"b985cec9b436948dc26dd5d36598aa30aa2d3582b1e7458961eeb10772d01901"} Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.450160 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-j2hxl" podStartSLOduration=3.450145191 podStartE2EDuration="3.450145191s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:48.440775563 +0000 UTC m=+1247.202035839" watchObservedRunningTime="2026-01-29 13:36:48.450145191 +0000 UTC m=+1247.211405467" Jan 29 13:36:48 crc kubenswrapper[4787]: I0129 13:36:48.955619 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.091282 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb\") pod \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.091357 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc\") pod \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.091636 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb\") pod \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.091688 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config\") pod \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.091761 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r9hv\" (UniqueName: \"kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv\") pod \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\" (UID: \"d111897d-ac6e-4f19-bbbb-b61e1b34deee\") " Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.106501 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv" (OuterVolumeSpecName: "kube-api-access-9r9hv") pod "d111897d-ac6e-4f19-bbbb-b61e1b34deee" (UID: "d111897d-ac6e-4f19-bbbb-b61e1b34deee"). InnerVolumeSpecName "kube-api-access-9r9hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.127372 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d111897d-ac6e-4f19-bbbb-b61e1b34deee" (UID: "d111897d-ac6e-4f19-bbbb-b61e1b34deee"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.132921 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config" (OuterVolumeSpecName: "config") pod "d111897d-ac6e-4f19-bbbb-b61e1b34deee" (UID: "d111897d-ac6e-4f19-bbbb-b61e1b34deee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.144385 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d111897d-ac6e-4f19-bbbb-b61e1b34deee" (UID: "d111897d-ac6e-4f19-bbbb-b61e1b34deee"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.151922 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d111897d-ac6e-4f19-bbbb-b61e1b34deee" (UID: "d111897d-ac6e-4f19-bbbb-b61e1b34deee"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.194775 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.194837 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.194847 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.194858 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d111897d-ac6e-4f19-bbbb-b61e1b34deee-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.194868 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r9hv\" (UniqueName: \"kubernetes.io/projected/d111897d-ac6e-4f19-bbbb-b61e1b34deee-kube-api-access-9r9hv\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.422395 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerStarted","Data":"1fcf0301a99317b5400daebd2a5909ce51eda77bf4e635536fd1722d8303e744"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.433707 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" event={"ID":"20a74a62-29d2-42f7-8e89-10746401cc47","Type":"ContainerStarted","Data":"05c2118580ea0dff8394d507da41b8d1ebdbbe48f5f715c29961a59400b02996"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.434086 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.439133 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" event={"ID":"d111897d-ac6e-4f19-bbbb-b61e1b34deee","Type":"ContainerDied","Data":"b03d5f7c98150fb948e0ba0595f2b93e2eea5072d6a64752499b00f794f783cc"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.439164 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-99559fbf5-rrjwp" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.439189 4787 scope.go:117] "RemoveContainer" containerID="82b59301b0d5d8dbdac71fdf979cb63a8337353d3706fffb71b0c1eb1249e371" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.469430 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.469492 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.469505 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.473623 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerStarted","Data":"1864f171dd7f0c07209a40d13174a5487a83b584c2d74da37fff67ecf0ae6287"} Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.508615 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" podStartSLOduration=4.50859803 podStartE2EDuration="4.50859803s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:49.45515604 +0000 UTC m=+1248.216416316" watchObservedRunningTime="2026-01-29 13:36:49.50859803 +0000 UTC m=+1248.269858306" Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.539477 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:49 crc kubenswrapper[4787]: I0129 13:36:49.541427 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-99559fbf5-rrjwp"] Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.002101 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d111897d-ac6e-4f19-bbbb-b61e1b34deee" path="/var/lib/kubelet/pods/d111897d-ac6e-4f19-bbbb-b61e1b34deee/volumes" Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.487847 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerStarted","Data":"964f3f92f050584e5142ba495cf2441db316593593fcf26751c09bf5236a8d52"} Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.488277 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-log" containerID="cri-o://1864f171dd7f0c07209a40d13174a5487a83b584c2d74da37fff67ecf0ae6287" gracePeriod=30 Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.488629 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-httpd" containerID="cri-o://964f3f92f050584e5142ba495cf2441db316593593fcf26751c09bf5236a8d52" gracePeriod=30 Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.495815 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-log" containerID="cri-o://1fcf0301a99317b5400daebd2a5909ce51eda77bf4e635536fd1722d8303e744" gracePeriod=30 Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.495896 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerStarted","Data":"9dc05ebbcb49bd8a44a284b0f00e382d32ebda038ffb5d7d558bce9be8ee1182"} Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.495952 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-httpd" containerID="cri-o://9dc05ebbcb49bd8a44a284b0f00e382d32ebda038ffb5d7d558bce9be8ee1182" gracePeriod=30 Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.514545 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.514523884 podStartE2EDuration="5.514523884s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:50.505921498 +0000 UTC m=+1249.267181774" watchObservedRunningTime="2026-01-29 13:36:50.514523884 +0000 UTC m=+1249.275784170" Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.516376 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67"} Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.516423 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerStarted","Data":"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289"} Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.539509 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.539489949 podStartE2EDuration="5.539489949s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:36:50.528547406 +0000 UTC m=+1249.289807682" watchObservedRunningTime="2026-01-29 13:36:50.539489949 +0000 UTC m=+1249.300750225" Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.580729 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.112932505 podStartE2EDuration="46.580707289s" podCreationTimestamp="2026-01-29 13:36:04 +0000 UTC" firstStartedPulling="2026-01-29 13:36:38.905667626 +0000 UTC m=+1237.666927902" lastFinishedPulling="2026-01-29 13:36:47.373442409 +0000 UTC m=+1246.134702686" observedRunningTime="2026-01-29 13:36:50.566577104 +0000 UTC m=+1249.327837380" watchObservedRunningTime="2026-01-29 13:36:50.580707289 +0000 UTC m=+1249.341967845" Jan 29 13:36:50 crc kubenswrapper[4787]: I0129 13:36:50.941688 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.019645 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:36:51 crc kubenswrapper[4787]: E0129 13:36:51.020000 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d111897d-ac6e-4f19-bbbb-b61e1b34deee" containerName="init" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.020012 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d111897d-ac6e-4f19-bbbb-b61e1b34deee" containerName="init" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.020188 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d111897d-ac6e-4f19-bbbb-b61e1b34deee" containerName="init" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.021014 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.036733 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.061327 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139496 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139564 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139592 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139701 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139796 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pftq\" (UniqueName: \"kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.139826 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243380 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243477 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243529 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243554 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243634 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.243696 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pftq\" (UniqueName: \"kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.244314 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.245568 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.245653 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.245731 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.246338 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.262939 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pftq\" (UniqueName: \"kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq\") pod \"dnsmasq-dns-66567888d7-vtkd9\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.361956 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.528845 4787 generic.go:334] "Generic (PLEG): container finished" podID="bda2514b-1841-471f-823e-013bdcf4786d" containerID="9dc05ebbcb49bd8a44a284b0f00e382d32ebda038ffb5d7d558bce9be8ee1182" exitCode=0 Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.528904 4787 generic.go:334] "Generic (PLEG): container finished" podID="bda2514b-1841-471f-823e-013bdcf4786d" containerID="1fcf0301a99317b5400daebd2a5909ce51eda77bf4e635536fd1722d8303e744" exitCode=143 Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.528980 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerDied","Data":"9dc05ebbcb49bd8a44a284b0f00e382d32ebda038ffb5d7d558bce9be8ee1182"} Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.529036 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerDied","Data":"1fcf0301a99317b5400daebd2a5909ce51eda77bf4e635536fd1722d8303e744"} Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.532946 4787 generic.go:334] "Generic (PLEG): container finished" podID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerID="964f3f92f050584e5142ba495cf2441db316593593fcf26751c09bf5236a8d52" exitCode=143 Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.533198 4787 generic.go:334] "Generic (PLEG): container finished" podID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerID="1864f171dd7f0c07209a40d13174a5487a83b584c2d74da37fff67ecf0ae6287" exitCode=143 Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.533540 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" containerID="cri-o://05c2118580ea0dff8394d507da41b8d1ebdbbe48f5f715c29961a59400b02996" gracePeriod=10 Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.534414 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerDied","Data":"964f3f92f050584e5142ba495cf2441db316593593fcf26751c09bf5236a8d52"} Jan 29 13:36:51 crc kubenswrapper[4787]: I0129 13:36:51.534484 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerDied","Data":"1864f171dd7f0c07209a40d13174a5487a83b584c2d74da37fff67ecf0ae6287"} Jan 29 13:36:52 crc kubenswrapper[4787]: I0129 13:36:52.545387 4787 generic.go:334] "Generic (PLEG): container finished" podID="93e1e85e-d253-48eb-b0fe-9eb484551076" containerID="736791f07d105c1f37aa90173faebd8d5e29477a8d86b9bfda3cfa33aab306fa" exitCode=0 Jan 29 13:36:52 crc kubenswrapper[4787]: I0129 13:36:52.545489 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-56d6b" event={"ID":"93e1e85e-d253-48eb-b0fe-9eb484551076","Type":"ContainerDied","Data":"736791f07d105c1f37aa90173faebd8d5e29477a8d86b9bfda3cfa33aab306fa"} Jan 29 13:36:52 crc kubenswrapper[4787]: I0129 13:36:52.549516 4787 generic.go:334] "Generic (PLEG): container finished" podID="20a74a62-29d2-42f7-8e89-10746401cc47" containerID="05c2118580ea0dff8394d507da41b8d1ebdbbe48f5f715c29961a59400b02996" exitCode=0 Jan 29 13:36:52 crc kubenswrapper[4787]: I0129 13:36:52.549563 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" event={"ID":"20a74a62-29d2-42f7-8e89-10746401cc47","Type":"ContainerDied","Data":"05c2118580ea0dff8394d507da41b8d1ebdbbe48f5f715c29961a59400b02996"} Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.145358 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.149528 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307689 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307747 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307783 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307838 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqsnk\" (UniqueName: \"kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307924 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fzl6\" (UniqueName: \"kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307964 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.307982 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308016 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308050 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308102 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308139 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308163 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run\") pod \"bda2514b-1841-471f-823e-013bdcf4786d\" (UID: \"bda2514b-1841-471f-823e-013bdcf4786d\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308183 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308288 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308313 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.308337 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle\") pod \"19a952e6-0e43-4b02-99b4-2de075f6a941\" (UID: \"19a952e6-0e43-4b02-99b4-2de075f6a941\") " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.310128 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.310994 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs" (OuterVolumeSpecName: "logs") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.311254 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs" (OuterVolumeSpecName: "logs") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.313188 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.319173 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts" (OuterVolumeSpecName: "scripts") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.321558 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.336987 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.337165 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk" (OuterVolumeSpecName: "kube-api-access-lqsnk") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "kube-api-access-lqsnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.337218 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6" (OuterVolumeSpecName: "kube-api-access-6fzl6") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "kube-api-access-6fzl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.350677 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts" (OuterVolumeSpecName: "scripts") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.367993 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.391610 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.394922 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.410983 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411162 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411242 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411305 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19a952e6-0e43-4b02-99b4-2de075f6a941-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411381 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411481 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411570 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411647 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqsnk\" (UniqueName: \"kubernetes.io/projected/bda2514b-1841-471f-823e-013bdcf4786d-kube-api-access-lqsnk\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411717 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fzl6\" (UniqueName: \"kubernetes.io/projected/19a952e6-0e43-4b02-99b4-2de075f6a941-kube-api-access-6fzl6\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411777 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bda2514b-1841-471f-823e-013bdcf4786d-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411836 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.411898 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.412056 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.412751 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data" (OuterVolumeSpecName: "config-data") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.428444 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "19a952e6-0e43-4b02-99b4-2de075f6a941" (UID: "19a952e6-0e43-4b02-99b4-2de075f6a941"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.431581 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.431922 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.434154 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data" (OuterVolumeSpecName: "config-data") pod "bda2514b-1841-471f-823e-013bdcf4786d" (UID: "bda2514b-1841-471f-823e-013bdcf4786d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.515488 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.515517 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.515527 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19a952e6-0e43-4b02-99b4-2de075f6a941-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.515537 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.515545 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bda2514b-1841-471f-823e-013bdcf4786d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.558850 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"19a952e6-0e43-4b02-99b4-2de075f6a941","Type":"ContainerDied","Data":"29ab489cd9accbcd9660e5674a1f52a50ce992427327ec1e6f35218ab3b95ad0"} Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.559213 4787 scope.go:117] "RemoveContainer" containerID="964f3f92f050584e5142ba495cf2441db316593593fcf26751c09bf5236a8d52" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.558878 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.564823 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bda2514b-1841-471f-823e-013bdcf4786d","Type":"ContainerDied","Data":"9c6ddd3d23456e89d4636b0d8c593b5bed4f1eb14b4a536a04a780662c160e45"} Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.565011 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.619553 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.642944 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.654390 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: E0129 13:36:53.654861 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.654874 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: E0129 13:36:53.654894 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.654900 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: E0129 13:36:53.654915 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.654921 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: E0129 13:36:53.654939 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.654945 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.655093 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.655110 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="bda2514b-1841-471f-823e-013bdcf4786d" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.655123 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-httpd" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.655136 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" containerName="glance-log" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.655991 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.661906 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.662578 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.662686 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.662721 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-tk4jm" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.700528 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.706601 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.723797 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.733722 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.735953 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.739042 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.739221 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.747387 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821213 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821265 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821303 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821330 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821392 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821412 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821444 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821484 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821510 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821535 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821578 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821601 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821624 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6clr9\" (UniqueName: \"kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.821789 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkjwd\" (UniqueName: \"kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.822923 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.822953 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924334 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924374 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924412 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924438 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924476 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924527 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924567 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6clr9\" (UniqueName: \"kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924589 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924610 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924669 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkjwd\" (UniqueName: \"kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924691 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924716 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924761 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924789 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.924839 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.925445 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.925648 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.925942 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.926107 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.926622 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.927520 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.928922 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.929082 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.930442 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.931404 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.933709 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.942182 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.943191 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.943596 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.947427 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6clr9\" (UniqueName: \"kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.947609 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkjwd\" (UniqueName: \"kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.954819 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.958841 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.995175 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.997172 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19a952e6-0e43-4b02-99b4-2de075f6a941" path="/var/lib/kubelet/pods/19a952e6-0e43-4b02-99b4-2de075f6a941/volumes" Jan 29 13:36:53 crc kubenswrapper[4787]: I0129 13:36:53.998086 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bda2514b-1841-471f-823e-013bdcf4786d" path="/var/lib/kubelet/pods/bda2514b-1841-471f-823e-013bdcf4786d/volumes" Jan 29 13:36:54 crc kubenswrapper[4787]: I0129 13:36:54.055127 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:36:58 crc kubenswrapper[4787]: I0129 13:36:58.394583 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:36:58 crc kubenswrapper[4787]: I0129 13:36:58.395081 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:37:01 crc kubenswrapper[4787]: I0129 13:37:01.247750 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 29 13:37:06 crc kubenswrapper[4787]: I0129 13:37:06.249244 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.631596 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.639511 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689191 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77x2q\" (UniqueName: \"kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689262 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689284 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689304 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nncgg\" (UniqueName: \"kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg\") pod \"20a74a62-29d2-42f7-8e89-10746401cc47\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689373 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb\") pod \"20a74a62-29d2-42f7-8e89-10746401cc47\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689406 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689423 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689518 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb\") pod \"20a74a62-29d2-42f7-8e89-10746401cc47\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689582 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config\") pod \"20a74a62-29d2-42f7-8e89-10746401cc47\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689599 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc\") pod \"20a74a62-29d2-42f7-8e89-10746401cc47\" (UID: \"20a74a62-29d2-42f7-8e89-10746401cc47\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.689619 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts\") pod \"93e1e85e-d253-48eb-b0fe-9eb484551076\" (UID: \"93e1e85e-d253-48eb-b0fe-9eb484551076\") " Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.709038 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.711378 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.711443 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q" (OuterVolumeSpecName: "kube-api-access-77x2q") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "kube-api-access-77x2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.712697 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg" (OuterVolumeSpecName: "kube-api-access-nncgg") pod "20a74a62-29d2-42f7-8e89-10746401cc47" (UID: "20a74a62-29d2-42f7-8e89-10746401cc47"). InnerVolumeSpecName "kube-api-access-nncgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.726118 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts" (OuterVolumeSpecName: "scripts") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.746141 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data" (OuterVolumeSpecName: "config-data") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.746739 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "20a74a62-29d2-42f7-8e89-10746401cc47" (UID: "20a74a62-29d2-42f7-8e89-10746401cc47"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.748805 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-56d6b" event={"ID":"93e1e85e-d253-48eb-b0fe-9eb484551076","Type":"ContainerDied","Data":"bfbfac753b6dfa81134b61c8ef729d8e8b2829e9eabd4059c428538b8fbcd0c2"} Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.748846 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfbfac753b6dfa81134b61c8ef729d8e8b2829e9eabd4059c428538b8fbcd0c2" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.748909 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-56d6b" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.750912 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93e1e85e-d253-48eb-b0fe-9eb484551076" (UID: "93e1e85e-d253-48eb-b0fe-9eb484551076"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.757380 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" event={"ID":"20a74a62-29d2-42f7-8e89-10746401cc47","Type":"ContainerDied","Data":"32e75d71b22149136b69bfb2b23a53e8cc40a98663c956b52dee9714480ded88"} Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.757543 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.759633 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "20a74a62-29d2-42f7-8e89-10746401cc47" (UID: "20a74a62-29d2-42f7-8e89-10746401cc47"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.759975 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config" (OuterVolumeSpecName: "config") pod "20a74a62-29d2-42f7-8e89-10746401cc47" (UID: "20a74a62-29d2-42f7-8e89-10746401cc47"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.773733 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "20a74a62-29d2-42f7-8e89-10746401cc47" (UID: "20a74a62-29d2-42f7-8e89-10746401cc47"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791543 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791584 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791592 4787 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791601 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791612 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791622 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a74a62-29d2-42f7-8e89-10746401cc47-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791630 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791641 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77x2q\" (UniqueName: \"kubernetes.io/projected/93e1e85e-d253-48eb-b0fe-9eb484551076-kube-api-access-77x2q\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791652 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791661 4787 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/93e1e85e-d253-48eb-b0fe-9eb484551076-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:07 crc kubenswrapper[4787]: I0129 13:37:07.791669 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nncgg\" (UniqueName: \"kubernetes.io/projected/20a74a62-29d2-42f7-8e89-10746401cc47-kube-api-access-nncgg\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.083765 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.088159 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67f84f7cd9-xr7br"] Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.727528 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-56d6b"] Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.736405 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-56d6b"] Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815420 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-l4wkc"] Jan 29 13:37:08 crc kubenswrapper[4787]: E0129 13:37:08.815752 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815763 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" Jan 29 13:37:08 crc kubenswrapper[4787]: E0129 13:37:08.815794 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="init" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815800 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="init" Jan 29 13:37:08 crc kubenswrapper[4787]: E0129 13:37:08.815809 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e1e85e-d253-48eb-b0fe-9eb484551076" containerName="keystone-bootstrap" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815815 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e1e85e-d253-48eb-b0fe-9eb484551076" containerName="keystone-bootstrap" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815964 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="93e1e85e-d253-48eb-b0fe-9eb484551076" containerName="keystone-bootstrap" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.815979 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.820297 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.823210 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.823254 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.823420 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2tqv" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.823666 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.824066 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.836423 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l4wkc"] Jan 29 13:37:08 crc kubenswrapper[4787]: E0129 13:37:08.839178 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777" Jan 29 13:37:08 crc kubenswrapper[4787]: E0129 13:37:08.839477 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:5a548c25fe3d02f7a042cb0a6d28fc8039a34c4a3b3d07aadda4aba3a926e777,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n658h87h658h576h548h5bch688h645hc9h5fdh5f9h66ch84h668h568h59hd7h699h55dh679h5d5h5d7h585hd6h668h54bhc7h697h658h597h675h56bq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nt7tf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(e13248de-b9b0-4027-bc49-e5a6ea72cf71): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914291 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914352 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914391 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914542 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914571 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:08 crc kubenswrapper[4787]: I0129 13:37:08.914611 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvcp2\" (UniqueName: \"kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016183 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016274 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvcp2\" (UniqueName: \"kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016426 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016499 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016565 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.016735 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.022387 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.022900 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.024788 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.026042 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.026755 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.041373 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvcp2\" (UniqueName: \"kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2\") pod \"keystone-bootstrap-l4wkc\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.143508 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.996024 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" path="/var/lib/kubelet/pods/20a74a62-29d2-42f7-8e89-10746401cc47/volumes" Jan 29 13:37:09 crc kubenswrapper[4787]: I0129 13:37:09.996881 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93e1e85e-d253-48eb-b0fe-9eb484551076" path="/var/lib/kubelet/pods/93e1e85e-d253-48eb-b0fe-9eb484551076/volumes" Jan 29 13:37:11 crc kubenswrapper[4787]: I0129 13:37:11.250783 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-67f84f7cd9-xr7br" podUID="20a74a62-29d2-42f7-8e89-10746401cc47" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 29 13:37:18 crc kubenswrapper[4787]: I0129 13:37:18.352703 4787 scope.go:117] "RemoveContainer" containerID="1864f171dd7f0c07209a40d13174a5487a83b584c2d74da37fff67ecf0ae6287" Jan 29 13:37:18 crc kubenswrapper[4787]: E0129 13:37:18.385912 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16" Jan 29 13:37:18 crc kubenswrapper[4787]: E0129 13:37:18.386081 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hh4kj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-p77h8_openstack(63ca8415-8ac5-4c3d-9fca-98a46e8a6da7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:37:18 crc kubenswrapper[4787]: E0129 13:37:18.388417 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-p77h8" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" Jan 29 13:37:18 crc kubenswrapper[4787]: I0129 13:37:18.600175 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:37:18 crc kubenswrapper[4787]: E0129 13:37:18.853173 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16\\\"\"" pod="openstack/barbican-db-sync-p77h8" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" Jan 29 13:37:20 crc kubenswrapper[4787]: W0129 13:37:20.328807 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9a222e3_4b19_4932_b08b_b7bb8d2edb0b.slice/crio-379f2b74dcb65a95088ed633348c8d73827293656c2488e1e97b7647d02fb2d7 WatchSource:0}: Error finding container 379f2b74dcb65a95088ed633348c8d73827293656c2488e1e97b7647d02fb2d7: Status 404 returned error can't find the container with id 379f2b74dcb65a95088ed633348c8d73827293656c2488e1e97b7647d02fb2d7 Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.344160 4787 scope.go:117] "RemoveContainer" containerID="9dc05ebbcb49bd8a44a284b0f00e382d32ebda038ffb5d7d558bce9be8ee1182" Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.558886 4787 scope.go:117] "RemoveContainer" containerID="1fcf0301a99317b5400daebd2a5909ce51eda77bf4e635536fd1722d8303e744" Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.587720 4787 scope.go:117] "RemoveContainer" containerID="05c2118580ea0dff8394d507da41b8d1ebdbbe48f5f715c29961a59400b02996" Jan 29 13:37:20 crc kubenswrapper[4787]: E0129 13:37:20.639368 4787 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 29 13:37:20 crc kubenswrapper[4787]: E0129 13:37:20.639532 4787 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ftjdb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-rr4qj_openstack(05d80766-0024-4274-934c-0c6e206e5de0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 29 13:37:20 crc kubenswrapper[4787]: E0129 13:37:20.640935 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-rr4qj" podUID="05d80766-0024-4274-934c-0c6e206e5de0" Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.643971 4787 scope.go:117] "RemoveContainer" containerID="5649f376f8260efeb0246306923ee1c4b9714541f81c7b517345bb8344516e81" Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.824576 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-l4wkc"] Jan 29 13:37:20 crc kubenswrapper[4787]: W0129 13:37:20.827541 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod200c9d48_bad8_492c_942c_054c187241eb.slice/crio-c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483 WatchSource:0}: Error finding container c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483: Status 404 returned error can't find the container with id c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483 Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.880060 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l4wkc" event={"ID":"200c9d48-bad8-492c-942c-054c187241eb","Type":"ContainerStarted","Data":"c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483"} Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.882480 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerStarted","Data":"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332"} Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.882509 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerStarted","Data":"379f2b74dcb65a95088ed633348c8d73827293656c2488e1e97b7647d02fb2d7"} Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.888009 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jp978" event={"ID":"e2d6dbc2-04fe-4797-818d-fb90c0ab7287","Type":"ContainerStarted","Data":"515a9ad605bcfe4d86f0114f11b93eddf063a9c6ad24638ac06c33b9c01f592e"} Jan 29 13:37:20 crc kubenswrapper[4787]: E0129 13:37:20.888518 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-rr4qj" podUID="05d80766-0024-4274-934c-0c6e206e5de0" Jan 29 13:37:20 crc kubenswrapper[4787]: I0129 13:37:20.967755 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:37:20 crc kubenswrapper[4787]: W0129 13:37:20.971980 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1d921a8_744d_46fd_b3be_7e79be2532b5.slice/crio-0193a862b13611e39081a3479b67d8a812951dc343d22cfae974ac0a670af57e WatchSource:0}: Error finding container 0193a862b13611e39081a3479b67d8a812951dc343d22cfae974ac0a670af57e: Status 404 returned error can't find the container with id 0193a862b13611e39081a3479b67d8a812951dc343d22cfae974ac0a670af57e Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.896859 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l4wkc" event={"ID":"200c9d48-bad8-492c-942c-054c187241eb","Type":"ContainerStarted","Data":"8ce05c87ef36708b8a80fbb2157bdd70fc766fc823b0f2bab968990b9bb3adb4"} Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.899918 4787 generic.go:334] "Generic (PLEG): container finished" podID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerID="52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332" exitCode=0 Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.900012 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerDied","Data":"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332"} Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.917289 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerStarted","Data":"be8fd3f509d83fcdfe98d0989d9c77781b4706d3e3e97d9679f5768c5b68ac7d"} Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.917407 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerStarted","Data":"0193a862b13611e39081a3479b67d8a812951dc343d22cfae974ac0a670af57e"} Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.929883 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-l4wkc" podStartSLOduration=13.929863723 podStartE2EDuration="13.929863723s" podCreationTimestamp="2026-01-29 13:37:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:21.917798948 +0000 UTC m=+1280.679059234" watchObservedRunningTime="2026-01-29 13:37:21.929863723 +0000 UTC m=+1280.691124009" Jan 29 13:37:21 crc kubenswrapper[4787]: I0129 13:37:21.994331 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-jp978" podStartSLOduration=3.94268181 podStartE2EDuration="36.994316128s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="2026-01-29 13:36:47.253581278 +0000 UTC m=+1246.014841554" lastFinishedPulling="2026-01-29 13:37:20.305215576 +0000 UTC m=+1279.066475872" observedRunningTime="2026-01-29 13:37:21.979223886 +0000 UTC m=+1280.740484182" watchObservedRunningTime="2026-01-29 13:37:21.994316128 +0000 UTC m=+1280.755576404" Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.082614 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.929169 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerStarted","Data":"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534"} Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.933296 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerStarted","Data":"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50"} Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.933525 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.936290 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerStarted","Data":"e7a3e3f5e79aa2de71737f592e1a4e16b7041628ae25cf8906f5f5ab0a696d5d"} Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.941079 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerStarted","Data":"e9d435265c2eb8d397b6eca29e5c7936a7d76f638d7ae2f619c6266bf81fb41a"} Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.941128 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerStarted","Data":"b07080e69232535ee2ff05a09fcc8ab3454b86b0e813890800eeae565d27f262"} Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.957978 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" podStartSLOduration=32.957955842 podStartE2EDuration="32.957955842s" podCreationTimestamp="2026-01-29 13:36:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:22.954444932 +0000 UTC m=+1281.715705208" watchObservedRunningTime="2026-01-29 13:37:22.957955842 +0000 UTC m=+1281.719216138" Jan 29 13:37:22 crc kubenswrapper[4787]: I0129 13:37:22.988911 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=29.988887928 podStartE2EDuration="29.988887928s" podCreationTimestamp="2026-01-29 13:36:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:22.973421235 +0000 UTC m=+1281.734681511" watchObservedRunningTime="2026-01-29 13:37:22.988887928 +0000 UTC m=+1281.750148214" Jan 29 13:37:23 crc kubenswrapper[4787]: I0129 13:37:23.961758 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerStarted","Data":"e1b53c5eb86cba39eede1437beceb0141e4f598736c4bedefcaa3544df888e01"} Jan 29 13:37:23 crc kubenswrapper[4787]: I0129 13:37:23.984925 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=30.98490505 podStartE2EDuration="30.98490505s" podCreationTimestamp="2026-01-29 13:36:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:23.981231424 +0000 UTC m=+1282.742491710" watchObservedRunningTime="2026-01-29 13:37:23.98490505 +0000 UTC m=+1282.746165346" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.012115 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.012168 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.012179 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.012191 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.030863 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.055788 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.055840 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.055925 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.055959 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.071793 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.094096 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:24 crc kubenswrapper[4787]: I0129 13:37:24.102618 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:25 crc kubenswrapper[4787]: I0129 13:37:25.978030 4787 generic.go:334] "Generic (PLEG): container finished" podID="200c9d48-bad8-492c-942c-054c187241eb" containerID="8ce05c87ef36708b8a80fbb2157bdd70fc766fc823b0f2bab968990b9bb3adb4" exitCode=0 Jan 29 13:37:25 crc kubenswrapper[4787]: I0129 13:37:25.978107 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l4wkc" event={"ID":"200c9d48-bad8-492c-942c-054c187241eb","Type":"ContainerDied","Data":"8ce05c87ef36708b8a80fbb2157bdd70fc766fc823b0f2bab968990b9bb3adb4"} Jan 29 13:37:26 crc kubenswrapper[4787]: I0129 13:37:26.991087 4787 generic.go:334] "Generic (PLEG): container finished" podID="e2d6dbc2-04fe-4797-818d-fb90c0ab7287" containerID="515a9ad605bcfe4d86f0114f11b93eddf063a9c6ad24638ac06c33b9c01f592e" exitCode=0 Jan 29 13:37:26 crc kubenswrapper[4787]: I0129 13:37:26.991128 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jp978" event={"ID":"e2d6dbc2-04fe-4797-818d-fb90c0ab7287","Type":"ContainerDied","Data":"515a9ad605bcfe4d86f0114f11b93eddf063a9c6ad24638ac06c33b9c01f592e"} Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.422428 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583094 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583138 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583181 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583248 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvcp2\" (UniqueName: \"kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583325 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.583415 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle\") pod \"200c9d48-bad8-492c-942c-054c187241eb\" (UID: \"200c9d48-bad8-492c-942c-054c187241eb\") " Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.588174 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.590428 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.590448 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2" (OuterVolumeSpecName: "kube-api-access-kvcp2") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "kube-api-access-kvcp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.593097 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts" (OuterVolumeSpecName: "scripts") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.614623 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.617320 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data" (OuterVolumeSpecName: "config-data") pod "200c9d48-bad8-492c-942c-054c187241eb" (UID: "200c9d48-bad8-492c-942c-054c187241eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685681 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685719 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685732 4787 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685741 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685751 4787 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/200c9d48-bad8-492c-942c-054c187241eb-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:27 crc kubenswrapper[4787]: I0129 13:37:27.685955 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvcp2\" (UniqueName: \"kubernetes.io/projected/200c9d48-bad8-492c-942c-054c187241eb-kube-api-access-kvcp2\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.005340 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-l4wkc" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.010432 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-l4wkc" event={"ID":"200c9d48-bad8-492c-942c-054c187241eb","Type":"ContainerDied","Data":"c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483"} Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.010835 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c40166b369b25489bed9d12ee3887dc79934cd97c1faf20171ac50c83bbad483" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.010862 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerStarted","Data":"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2"} Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.106880 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:37:28 crc kubenswrapper[4787]: E0129 13:37:28.107353 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="200c9d48-bad8-492c-942c-054c187241eb" containerName="keystone-bootstrap" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.107381 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="200c9d48-bad8-492c-942c-054c187241eb" containerName="keystone-bootstrap" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.107637 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="200c9d48-bad8-492c-942c-054c187241eb" containerName="keystone-bootstrap" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.108328 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.110884 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.111213 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.111358 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.111484 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.120802 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.121016 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k2tqv" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.121545 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.264306 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jp978" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297698 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297794 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2k2n\" (UniqueName: \"kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297834 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297891 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297913 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.297942 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.298010 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.298042 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.394310 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.394384 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.398947 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs\") pod \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399067 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle\") pod \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399192 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data\") pod \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399266 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgk45\" (UniqueName: \"kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45\") pod \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399302 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts\") pod \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\" (UID: \"e2d6dbc2-04fe-4797-818d-fb90c0ab7287\") " Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399514 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs" (OuterVolumeSpecName: "logs") pod "e2d6dbc2-04fe-4797-818d-fb90c0ab7287" (UID: "e2d6dbc2-04fe-4797-818d-fb90c0ab7287"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399540 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2k2n\" (UniqueName: \"kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399579 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399650 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399678 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399711 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399784 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399851 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.399949 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.406468 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45" (OuterVolumeSpecName: "kube-api-access-kgk45") pod "e2d6dbc2-04fe-4797-818d-fb90c0ab7287" (UID: "e2d6dbc2-04fe-4797-818d-fb90c0ab7287"). InnerVolumeSpecName "kube-api-access-kgk45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.408052 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts" (OuterVolumeSpecName: "scripts") pod "e2d6dbc2-04fe-4797-818d-fb90c0ab7287" (UID: "e2d6dbc2-04fe-4797-818d-fb90c0ab7287"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.417056 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.417324 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.418387 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.418767 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2k2n\" (UniqueName: \"kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.418768 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.419744 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.431909 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.434196 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys\") pod \"keystone-696b9bdfd-pnnmf\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.449212 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.463234 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2d6dbc2-04fe-4797-818d-fb90c0ab7287" (UID: "e2d6dbc2-04fe-4797-818d-fb90c0ab7287"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.489710 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data" (OuterVolumeSpecName: "config-data") pod "e2d6dbc2-04fe-4797-818d-fb90c0ab7287" (UID: "e2d6dbc2-04fe-4797-818d-fb90c0ab7287"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.502541 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.502581 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgk45\" (UniqueName: \"kubernetes.io/projected/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-kube-api-access-kgk45\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.502596 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.502608 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2d6dbc2-04fe-4797-818d-fb90c0ab7287-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:28 crc kubenswrapper[4787]: I0129 13:37:28.729972 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.018740 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-jp978" event={"ID":"e2d6dbc2-04fe-4797-818d-fb90c0ab7287","Type":"ContainerDied","Data":"97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2"} Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.019124 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97d913a18d7c1f54d0adec649c21f5bd7b4d7ba59cc8fbbe1772327703246ba2" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.018783 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-jp978" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.020316 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-696b9bdfd-pnnmf" event={"ID":"3d1018e7-6cf6-4c3e-b351-6249e795620d","Type":"ContainerStarted","Data":"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b"} Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.020410 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-696b9bdfd-pnnmf" event={"ID":"3d1018e7-6cf6-4c3e-b351-6249e795620d","Type":"ContainerStarted","Data":"cf3d3432082d77bfa118e1d58ea109e9770af02e106143d233074811c87afdc2"} Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.020527 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.063999 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-696b9bdfd-pnnmf" podStartSLOduration=1.063979571 podStartE2EDuration="1.063979571s" podCreationTimestamp="2026-01-29 13:37:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:29.053343306 +0000 UTC m=+1287.814603592" watchObservedRunningTime="2026-01-29 13:37:29.063979571 +0000 UTC m=+1287.825239857" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.365157 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:37:29 crc kubenswrapper[4787]: E0129 13:37:29.365609 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2d6dbc2-04fe-4797-818d-fb90c0ab7287" containerName="placement-db-sync" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.365631 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2d6dbc2-04fe-4797-818d-fb90c0ab7287" containerName="placement-db-sync" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.365836 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2d6dbc2-04fe-4797-818d-fb90c0ab7287" containerName="placement-db-sync" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.367155 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.376348 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.376384 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.376348 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nz8rd" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.376753 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.377654 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.380159 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.521973 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522023 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522059 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522100 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522132 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6554\" (UniqueName: \"kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522223 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.522254 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.623725 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624127 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624165 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624196 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624232 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624265 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6554\" (UniqueName: \"kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.624344 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.625856 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.629960 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.630186 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.630318 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.630714 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.640515 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.647370 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6554\" (UniqueName: \"kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554\") pod \"placement-69d8bc6c98-vmd8w\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:29 crc kubenswrapper[4787]: I0129 13:37:29.686942 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:30 crc kubenswrapper[4787]: I0129 13:37:30.116444 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:37:30 crc kubenswrapper[4787]: W0129 13:37:30.125019 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87eff82d_823f_44a9_b96b_fed35701c54b.slice/crio-d1e6cbe8ec4f7d4811522738b72c5fbbc76a769496c56c7335ad72ba279a33ad WatchSource:0}: Error finding container d1e6cbe8ec4f7d4811522738b72c5fbbc76a769496c56c7335ad72ba279a33ad: Status 404 returned error can't find the container with id d1e6cbe8ec4f7d4811522738b72c5fbbc76a769496c56c7335ad72ba279a33ad Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.047981 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerStarted","Data":"1f0d6877829ebcf7be918239787102e0a2f16c103fadf03c565be16af5f1f03a"} Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.048394 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.048412 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.048423 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerStarted","Data":"7fa1b78bdd06010ec0e648c7dc942c45ba46fb8d183d57540ee5aece3a17a14d"} Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.048436 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerStarted","Data":"d1e6cbe8ec4f7d4811522738b72c5fbbc76a769496c56c7335ad72ba279a33ad"} Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.078362 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-69d8bc6c98-vmd8w" podStartSLOduration=2.078339862 podStartE2EDuration="2.078339862s" podCreationTimestamp="2026-01-29 13:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:31.078299931 +0000 UTC m=+1289.839560247" watchObservedRunningTime="2026-01-29 13:37:31.078339862 +0000 UTC m=+1289.839600148" Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.363625 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.435602 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:37:31 crc kubenswrapper[4787]: I0129 13:37:31.435896 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="dnsmasq-dns" containerID="cri-o://fed86be861d29e307074c02d4e5f6a4a5e8ebf89bc7bc4d49f92042fa6e9ec31" gracePeriod=10 Jan 29 13:37:32 crc kubenswrapper[4787]: I0129 13:37:32.058136 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerID="fed86be861d29e307074c02d4e5f6a4a5e8ebf89bc7bc4d49f92042fa6e9ec31" exitCode=0 Jan 29 13:37:32 crc kubenswrapper[4787]: I0129 13:37:32.058223 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" event={"ID":"0d533171-7cfb-4ab5-9fca-d294ee78d912","Type":"ContainerDied","Data":"fed86be861d29e307074c02d4e5f6a4a5e8ebf89bc7bc4d49f92042fa6e9ec31"} Jan 29 13:37:33 crc kubenswrapper[4787]: I0129 13:37:33.977795 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.081552 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" event={"ID":"0d533171-7cfb-4ab5-9fca-d294ee78d912","Type":"ContainerDied","Data":"6deec4370807100a320ad726300ce380e0353ef908329c292cd83a9ebbb3779d"} Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.081739 4787 scope.go:117] "RemoveContainer" containerID="fed86be861d29e307074c02d4e5f6a4a5e8ebf89bc7bc4d49f92042fa6e9ec31" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.081614 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.128116 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb\") pod \"0d533171-7cfb-4ab5-9fca-d294ee78d912\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.128193 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config\") pod \"0d533171-7cfb-4ab5-9fca-d294ee78d912\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.128238 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc\") pod \"0d533171-7cfb-4ab5-9fca-d294ee78d912\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.128268 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4xdj\" (UniqueName: \"kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj\") pod \"0d533171-7cfb-4ab5-9fca-d294ee78d912\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.128346 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb\") pod \"0d533171-7cfb-4ab5-9fca-d294ee78d912\" (UID: \"0d533171-7cfb-4ab5-9fca-d294ee78d912\") " Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.134093 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj" (OuterVolumeSpecName: "kube-api-access-f4xdj") pod "0d533171-7cfb-4ab5-9fca-d294ee78d912" (UID: "0d533171-7cfb-4ab5-9fca-d294ee78d912"). InnerVolumeSpecName "kube-api-access-f4xdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.173542 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0d533171-7cfb-4ab5-9fca-d294ee78d912" (UID: "0d533171-7cfb-4ab5-9fca-d294ee78d912"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.176936 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config" (OuterVolumeSpecName: "config") pod "0d533171-7cfb-4ab5-9fca-d294ee78d912" (UID: "0d533171-7cfb-4ab5-9fca-d294ee78d912"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.192493 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0d533171-7cfb-4ab5-9fca-d294ee78d912" (UID: "0d533171-7cfb-4ab5-9fca-d294ee78d912"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.195041 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0d533171-7cfb-4ab5-9fca-d294ee78d912" (UID: "0d533171-7cfb-4ab5-9fca-d294ee78d912"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.230869 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.230905 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.230913 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.230923 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4xdj\" (UniqueName: \"kubernetes.io/projected/0d533171-7cfb-4ab5-9fca-d294ee78d912-kube-api-access-f4xdj\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.230935 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d533171-7cfb-4ab5-9fca-d294ee78d912-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.422972 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:37:34 crc kubenswrapper[4787]: I0129 13:37:34.432944 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bfd654465-tln9h"] Jan 29 13:37:36 crc kubenswrapper[4787]: I0129 13:37:36.008152 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" path="/var/lib/kubelet/pods/0d533171-7cfb-4ab5-9fca-d294ee78d912/volumes" Jan 29 13:37:37 crc kubenswrapper[4787]: I0129 13:37:37.565078 4787 scope.go:117] "RemoveContainer" containerID="9f6b8c6119bbd9dfde36a5c6f71c3c39ab9475d0f24d64b2fc31e63d3b138c09" Jan 29 13:37:38 crc kubenswrapper[4787]: E0129 13:37:38.096063 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.125271 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p77h8" event={"ID":"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7","Type":"ContainerStarted","Data":"dc7044db9f24464b95289a083efb431b8b4cd106cf268091236b9976bf84b435"} Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.130899 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerStarted","Data":"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937"} Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.131072 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="ceilometer-notification-agent" containerID="cri-o://d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534" gracePeriod=30 Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.131175 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.131233 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="proxy-httpd" containerID="cri-o://302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937" gracePeriod=30 Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.131285 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="sg-core" containerID="cri-o://1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2" gracePeriod=30 Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.144812 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-p77h8" podStartSLOduration=2.575180104 podStartE2EDuration="53.144790332s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="2026-01-29 13:36:47.256868142 +0000 UTC m=+1246.018128418" lastFinishedPulling="2026-01-29 13:37:37.82647833 +0000 UTC m=+1296.587738646" observedRunningTime="2026-01-29 13:37:38.138358118 +0000 UTC m=+1296.899618424" watchObservedRunningTime="2026-01-29 13:37:38.144790332 +0000 UTC m=+1296.906050608" Jan 29 13:37:38 crc kubenswrapper[4787]: I0129 13:37:38.900589 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bfd654465-tln9h" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.139930 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rr4qj" event={"ID":"05d80766-0024-4274-934c-0c6e206e5de0","Type":"ContainerStarted","Data":"a9a6511c15870cb7985525c82b23681be5038d9f50adfd768e90990683aa7089"} Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.141574 4787 generic.go:334] "Generic (PLEG): container finished" podID="62924dc0-4190-4229-a277-6a3a1f775498" containerID="e1b6628372053d9ce300b29e161a3428b69bea52b970a5b5a24b898c530045d1" exitCode=0 Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.141656 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j2hxl" event={"ID":"62924dc0-4190-4229-a277-6a3a1f775498","Type":"ContainerDied","Data":"e1b6628372053d9ce300b29e161a3428b69bea52b970a5b5a24b898c530045d1"} Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.143832 4787 generic.go:334] "Generic (PLEG): container finished" podID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerID="1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2" exitCode=2 Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.143866 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerDied","Data":"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2"} Jan 29 13:37:39 crc kubenswrapper[4787]: I0129 13:37:39.196181 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-rr4qj" podStartSLOduration=3.643552632 podStartE2EDuration="54.196140832s" podCreationTimestamp="2026-01-29 13:36:45 +0000 UTC" firstStartedPulling="2026-01-29 13:36:47.271966595 +0000 UTC m=+1246.033226881" lastFinishedPulling="2026-01-29 13:37:37.824554795 +0000 UTC m=+1296.585815081" observedRunningTime="2026-01-29 13:37:39.173205988 +0000 UTC m=+1297.934466314" watchObservedRunningTime="2026-01-29 13:37:39.196140832 +0000 UTC m=+1297.957401158" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.161629 4787 generic.go:334] "Generic (PLEG): container finished" podID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerID="d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534" exitCode=0 Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.161780 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerDied","Data":"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534"} Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.469071 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.541528 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pjd8\" (UniqueName: \"kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8\") pod \"62924dc0-4190-4229-a277-6a3a1f775498\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.541642 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config\") pod \"62924dc0-4190-4229-a277-6a3a1f775498\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.541723 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle\") pod \"62924dc0-4190-4229-a277-6a3a1f775498\" (UID: \"62924dc0-4190-4229-a277-6a3a1f775498\") " Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.547071 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8" (OuterVolumeSpecName: "kube-api-access-2pjd8") pod "62924dc0-4190-4229-a277-6a3a1f775498" (UID: "62924dc0-4190-4229-a277-6a3a1f775498"). InnerVolumeSpecName "kube-api-access-2pjd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.566447 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config" (OuterVolumeSpecName: "config") pod "62924dc0-4190-4229-a277-6a3a1f775498" (UID: "62924dc0-4190-4229-a277-6a3a1f775498"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.570788 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62924dc0-4190-4229-a277-6a3a1f775498" (UID: "62924dc0-4190-4229-a277-6a3a1f775498"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.644315 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pjd8\" (UniqueName: \"kubernetes.io/projected/62924dc0-4190-4229-a277-6a3a1f775498-kube-api-access-2pjd8\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.644579 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:40 crc kubenswrapper[4787]: I0129 13:37:40.644655 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62924dc0-4190-4229-a277-6a3a1f775498-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.171744 4787 generic.go:334] "Generic (PLEG): container finished" podID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" containerID="dc7044db9f24464b95289a083efb431b8b4cd106cf268091236b9976bf84b435" exitCode=0 Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.172097 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p77h8" event={"ID":"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7","Type":"ContainerDied","Data":"dc7044db9f24464b95289a083efb431b8b4cd106cf268091236b9976bf84b435"} Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.173365 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-j2hxl" event={"ID":"62924dc0-4190-4229-a277-6a3a1f775498","Type":"ContainerDied","Data":"b985cec9b436948dc26dd5d36598aa30aa2d3582b1e7458961eeb10772d01901"} Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.173389 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b985cec9b436948dc26dd5d36598aa30aa2d3582b1e7458961eeb10772d01901" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.173440 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-j2hxl" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.441153 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:41 crc kubenswrapper[4787]: E0129 13:37:41.441838 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="init" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.441926 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="init" Jan 29 13:37:41 crc kubenswrapper[4787]: E0129 13:37:41.442064 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="dnsmasq-dns" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.442142 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="dnsmasq-dns" Jan 29 13:37:41 crc kubenswrapper[4787]: E0129 13:37:41.442205 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62924dc0-4190-4229-a277-6a3a1f775498" containerName="neutron-db-sync" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.442280 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="62924dc0-4190-4229-a277-6a3a1f775498" containerName="neutron-db-sync" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.442596 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="62924dc0-4190-4229-a277-6a3a1f775498" containerName="neutron-db-sync" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.442702 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d533171-7cfb-4ab5-9fca-d294ee78d912" containerName="dnsmasq-dns" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.443884 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.464167 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.513019 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.514350 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.518039 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6gpn9" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.519028 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.519201 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.519327 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.526736 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565371 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565425 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl7lt\" (UniqueName: \"kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565509 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565559 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565581 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.565619 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667422 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667566 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667629 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667656 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667761 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44rrm\" (UniqueName: \"kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667815 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl7lt\" (UniqueName: \"kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667862 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667936 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.667965 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668022 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668070 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668553 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668560 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668848 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.668955 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.669353 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.686838 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl7lt\" (UniqueName: \"kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt\") pod \"dnsmasq-dns-7bb67c87c9-g2mrb\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769027 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769609 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769726 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769822 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769862 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44rrm\" (UniqueName: \"kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.769898 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.773388 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.774216 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.775107 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.781670 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.789566 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44rrm\" (UniqueName: \"kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm\") pod \"neutron-587b8b9bbb-dm8wt\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:41 crc kubenswrapper[4787]: I0129 13:37:41.832443 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.374079 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.503520 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p77h8" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.586795 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle\") pod \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.586847 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh4kj\" (UniqueName: \"kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj\") pod \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.586898 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data\") pod \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\" (UID: \"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7\") " Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.602674 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" (UID: "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.602895 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj" (OuterVolumeSpecName: "kube-api-access-hh4kj") pod "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" (UID: "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7"). InnerVolumeSpecName "kube-api-access-hh4kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.635009 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" (UID: "63ca8415-8ac5-4c3d-9fca-98a46e8a6da7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.651618 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:37:42 crc kubenswrapper[4787]: W0129 13:37:42.662376 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec4db889_9f8b_40e1_9127_63a9fb91dc1b.slice/crio-ac240acb6aee7ff1f22cc9881204b13599dadfea4457721cf219d68e8eff9415 WatchSource:0}: Error finding container ac240acb6aee7ff1f22cc9881204b13599dadfea4457721cf219d68e8eff9415: Status 404 returned error can't find the container with id ac240acb6aee7ff1f22cc9881204b13599dadfea4457721cf219d68e8eff9415 Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.688316 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.688353 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh4kj\" (UniqueName: \"kubernetes.io/projected/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-kube-api-access-hh4kj\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:42 crc kubenswrapper[4787]: I0129 13:37:42.688367 4787 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.191123 4787 generic.go:334] "Generic (PLEG): container finished" podID="05d80766-0024-4274-934c-0c6e206e5de0" containerID="a9a6511c15870cb7985525c82b23681be5038d9f50adfd768e90990683aa7089" exitCode=0 Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.191257 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rr4qj" event={"ID":"05d80766-0024-4274-934c-0c6e206e5de0","Type":"ContainerDied","Data":"a9a6511c15870cb7985525c82b23681be5038d9f50adfd768e90990683aa7089"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.194928 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p77h8" event={"ID":"63ca8415-8ac5-4c3d-9fca-98a46e8a6da7","Type":"ContainerDied","Data":"7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.194968 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f4af4e42eeb109c266490326e09eed4c4ea7277d5c4aca995590309f21757c4" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.195281 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p77h8" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.202675 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerStarted","Data":"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.202712 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerStarted","Data":"ac240acb6aee7ff1f22cc9881204b13599dadfea4457721cf219d68e8eff9415"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.204387 4787 generic.go:334] "Generic (PLEG): container finished" podID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerID="9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67" exitCode=0 Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.204421 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" event={"ID":"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e","Type":"ContainerDied","Data":"9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.204448 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" event={"ID":"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e","Type":"ContainerStarted","Data":"b79759106bd07818b057b03bd775ca3e6e4b5dce7b8539a8d6bdd7d38bee1214"} Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.461497 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:37:43 crc kubenswrapper[4787]: E0129 13:37:43.461951 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" containerName="barbican-db-sync" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.461965 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" containerName="barbican-db-sync" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.462160 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" containerName="barbican-db-sync" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.463098 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.465965 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rjz4t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.466153 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.472936 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.479202 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.480933 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.485333 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.495472 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.517187 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.563300 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.602024 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.603729 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609400 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609471 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffn97\" (UniqueName: \"kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609519 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609543 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609575 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609593 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609643 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccz8k\" (UniqueName: \"kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609666 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609686 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.609708 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.621242 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.662720 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.664007 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.673075 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.689554 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711343 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffn97\" (UniqueName: \"kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711402 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4v66\" (UniqueName: \"kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711428 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711448 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711508 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711527 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711553 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711578 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711620 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccz8k\" (UniqueName: \"kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711638 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711660 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711680 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711701 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711720 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711745 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.711766 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.716975 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.717050 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.721701 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.723075 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.727341 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.727400 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.729602 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.736138 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.767397 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccz8k\" (UniqueName: \"kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k\") pod \"barbican-worker-64fc7f548f-h8fjw\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.771025 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffn97\" (UniqueName: \"kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97\") pod \"barbican-keystone-listener-6f66c4d958-z5ntb\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.796067 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813173 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813233 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813275 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813308 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813334 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813354 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813371 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813402 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813423 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813495 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4v66\" (UniqueName: \"kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813528 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9lnt\" (UniqueName: \"kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.813887 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.814286 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.814617 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.814993 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.815355 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.817686 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.833143 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4v66\" (UniqueName: \"kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66\") pod \"dnsmasq-dns-54c4dfcffc-jgw6j\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.915310 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.915688 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.915777 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9lnt\" (UniqueName: \"kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.916352 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.916402 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.916704 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.922250 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.922271 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.929283 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.943165 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:43 crc kubenswrapper[4787]: I0129 13:37:43.947092 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9lnt\" (UniqueName: \"kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt\") pod \"barbican-api-d75dcc4b8-m2d8t\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.199057 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.209302 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.222893 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.226872 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.227061 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.239216 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerStarted","Data":"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea"} Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.239311 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.241069 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="dnsmasq-dns" containerID="cri-o://509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e" gracePeriod=10 Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.241394 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" event={"ID":"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e","Type":"ContainerStarted","Data":"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e"} Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.241433 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.250805 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.304691 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-587b8b9bbb-dm8wt" podStartSLOduration=3.304652626 podStartE2EDuration="3.304652626s" podCreationTimestamp="2026-01-29 13:37:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:44.289795121 +0000 UTC m=+1303.051055397" watchObservedRunningTime="2026-01-29 13:37:44.304652626 +0000 UTC m=+1303.065912902" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.305660 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" podStartSLOduration=3.305653622 podStartE2EDuration="3.305653622s" podCreationTimestamp="2026-01-29 13:37:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:44.268033068 +0000 UTC m=+1303.029293344" watchObservedRunningTime="2026-01-29 13:37:44.305653622 +0000 UTC m=+1303.066913898" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.316262 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323638 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323680 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctmzh\" (UniqueName: \"kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323754 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323771 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323799 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323815 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.323831 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.356141 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.428595 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.428980 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.429022 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.429046 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.429071 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.429202 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.429228 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctmzh\" (UniqueName: \"kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.443402 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.444691 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.444762 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.445175 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.445352 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.446216 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.449312 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctmzh\" (UniqueName: \"kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh\") pod \"neutron-85444c4b89-hx4zl\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.454179 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:44 crc kubenswrapper[4787]: W0129 13:37:44.475619 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9db31e9c_3b01_4b05_99b0_06ab2bdbd69f.slice/crio-2662d752c425d3ba9e86890f04b0885b96d02a8e09bb8d67d692384d3bee14cc WatchSource:0}: Error finding container 2662d752c425d3ba9e86890f04b0885b96d02a8e09bb8d67d692384d3bee14cc: Status 404 returned error can't find the container with id 2662d752c425d3ba9e86890f04b0885b96d02a8e09bb8d67d692384d3bee14cc Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.551878 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.750816 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.962631 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:37:44 crc kubenswrapper[4787]: I0129 13:37:44.966654 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048621 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048679 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dl7lt\" (UniqueName: \"kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048709 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048767 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048798 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048826 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048862 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048908 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048928 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc\") pod \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\" (UID: \"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048946 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.048985 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.049005 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftjdb\" (UniqueName: \"kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb\") pod \"05d80766-0024-4274-934c-0c6e206e5de0\" (UID: \"05d80766-0024-4274-934c-0c6e206e5de0\") " Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.057020 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.059751 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt" (OuterVolumeSpecName: "kube-api-access-dl7lt") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "kube-api-access-dl7lt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.061396 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.062883 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb" (OuterVolumeSpecName: "kube-api-access-ftjdb") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "kube-api-access-ftjdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.067298 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts" (OuterVolumeSpecName: "scripts") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.118773 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.123953 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.136771 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.136766 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.146860 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config" (OuterVolumeSpecName: "config") pod "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" (UID: "d0f253d3-1cba-4b6d-aedd-54d2a8a1821e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151078 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151114 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151127 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftjdb\" (UniqueName: \"kubernetes.io/projected/05d80766-0024-4274-934c-0c6e206e5de0-kube-api-access-ftjdb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151140 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151151 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dl7lt\" (UniqueName: \"kubernetes.io/projected/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-kube-api-access-dl7lt\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151161 4787 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/05d80766-0024-4274-934c-0c6e206e5de0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151172 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151182 4787 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151192 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.151202 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.153950 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.156053 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data" (OuterVolumeSpecName: "config-data") pod "05d80766-0024-4274-934c-0c6e206e5de0" (UID: "05d80766-0024-4274-934c-0c6e206e5de0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.224615 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.254581 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.254604 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05d80766-0024-4274-934c-0c6e206e5de0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.264131 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerStarted","Data":"35dde23ebeadb65cd16425b28f0ca3da7f087aac2a6397d4362de506f9ab09c9"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.279530 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-rr4qj" event={"ID":"05d80766-0024-4274-934c-0c6e206e5de0","Type":"ContainerDied","Data":"059e835e2c1db4218f2abd662c77102803437c86e42932771c4c48be1ea43c63"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.279576 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="059e835e2c1db4218f2abd662c77102803437c86e42932771c4c48be1ea43c63" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.279646 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-rr4qj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.282489 4787 generic.go:334] "Generic (PLEG): container finished" podID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerID="11b44a359668a2d3b56bcaa9ef64d66e6f3aaa9fbfac978b14bdca0fd4d7f108" exitCode=0 Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.282667 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" event={"ID":"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f","Type":"ContainerDied","Data":"11b44a359668a2d3b56bcaa9ef64d66e6f3aaa9fbfac978b14bdca0fd4d7f108"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.284643 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" event={"ID":"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f","Type":"ContainerStarted","Data":"2662d752c425d3ba9e86890f04b0885b96d02a8e09bb8d67d692384d3bee14cc"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.290280 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerStarted","Data":"7f6eb5dee970a1f1d2da58a5bd57fdef17f9e5b8a48de2e6d1bb14db37ea8e2e"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.293909 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerStarted","Data":"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.294269 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerStarted","Data":"981f9fc060eda2538ea1ad43ada1ca92afd841eb1a3a3377163e7b67e1e900f0"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.298386 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerStarted","Data":"0ee4250ebe7ac6d2e09a58e53f3ae655d528d7e62f8962ddf297f31e95429afd"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.318444 4787 generic.go:334] "Generic (PLEG): container finished" podID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerID="509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e" exitCode=0 Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.319008 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.319835 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" event={"ID":"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e","Type":"ContainerDied","Data":"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.319900 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bb67c87c9-g2mrb" event={"ID":"d0f253d3-1cba-4b6d-aedd-54d2a8a1821e","Type":"ContainerDied","Data":"b79759106bd07818b057b03bd775ca3e6e4b5dce7b8539a8d6bdd7d38bee1214"} Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.319922 4787 scope.go:117] "RemoveContainer" containerID="509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.383618 4787 scope.go:117] "RemoveContainer" containerID="9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.403216 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.412039 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bb67c87c9-g2mrb"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420034 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:37:45 crc kubenswrapper[4787]: E0129 13:37:45.420456 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d80766-0024-4274-934c-0c6e206e5de0" containerName="cinder-db-sync" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420527 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d80766-0024-4274-934c-0c6e206e5de0" containerName="cinder-db-sync" Jan 29 13:37:45 crc kubenswrapper[4787]: E0129 13:37:45.420541 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="dnsmasq-dns" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420549 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="dnsmasq-dns" Jan 29 13:37:45 crc kubenswrapper[4787]: E0129 13:37:45.420573 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="init" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420579 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="init" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420829 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d80766-0024-4274-934c-0c6e206e5de0" containerName="cinder-db-sync" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.420862 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" containerName="dnsmasq-dns" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.422320 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.451718 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-2ltwr" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.452099 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.452357 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.457099 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.475513 4787 scope.go:117] "RemoveContainer" containerID="509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e" Jan 29 13:37:45 crc kubenswrapper[4787]: E0129 13:37:45.476836 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e\": container with ID starting with 509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e not found: ID does not exist" containerID="509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.476875 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e"} err="failed to get container status \"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e\": rpc error: code = NotFound desc = could not find container \"509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e\": container with ID starting with 509a5a72e383f1a11bf7babb2beef2a95918ac29f4cc2a591c9816e7298fdb0e not found: ID does not exist" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.476902 4787 scope.go:117] "RemoveContainer" containerID="9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67" Jan 29 13:37:45 crc kubenswrapper[4787]: E0129 13:37:45.480658 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67\": container with ID starting with 9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67 not found: ID does not exist" containerID="9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.480727 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67"} err="failed to get container status \"9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67\": rpc error: code = NotFound desc = could not find container \"9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67\": container with ID starting with 9b62fbad648587050c5a8f892c83ea6114c828b9de12e240706294542158aa67 not found: ID does not exist" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.489602 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.489714 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.489989 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlsvt\" (UniqueName: \"kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.490517 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.490582 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.490689 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.490797 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.557353 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596685 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596807 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlsvt\" (UniqueName: \"kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596867 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596886 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596922 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.596968 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.597994 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.603142 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.603210 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.603225 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.604234 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.605286 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.607977 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.626950 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlsvt\" (UniqueName: \"kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt\") pod \"cinder-scheduler-0\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.629926 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.672202 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.673910 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.680175 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.685371 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699408 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699481 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699498 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmj5x\" (UniqueName: \"kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699544 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699565 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.699605 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802424 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802498 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802522 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802540 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmj5x\" (UniqueName: \"kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802565 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802593 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n4pv\" (UniqueName: \"kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802616 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802634 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802656 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802680 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802701 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802728 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.802755 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.803213 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.803649 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.803784 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.803992 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.807040 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.809161 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.819022 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmj5x\" (UniqueName: \"kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x\") pod \"dnsmasq-dns-6b4f5fc4f-6vrjj\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.904413 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.904835 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.904932 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.904962 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.904990 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n4pv\" (UniqueName: \"kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.905016 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.905031 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.906487 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.906546 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.930683 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.931171 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.934575 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.943090 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.943524 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n4pv\" (UniqueName: \"kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv\") pod \"cinder-api-0\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " pod="openstack/cinder-api-0" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.954149 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:45 crc kubenswrapper[4787]: I0129 13:37:45.988201 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.001053 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f253d3-1cba-4b6d-aedd-54d2a8a1821e" path="/var/lib/kubelet/pods/d0f253d3-1cba-4b6d-aedd-54d2a8a1821e/volumes" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.348237 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerStarted","Data":"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f"} Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.349744 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.349774 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.353406 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" event={"ID":"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f","Type":"ContainerStarted","Data":"2606d1d1fd7e3134e40393f8a812eda120def5e8c3125b8d18d7b50b8e1a5221"} Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.353578 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="dnsmasq-dns" containerID="cri-o://2606d1d1fd7e3134e40393f8a812eda120def5e8c3125b8d18d7b50b8e1a5221" gracePeriod=10 Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.353746 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.365880 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerStarted","Data":"4505f4e6e319771bc3829367ee02ff72fbb90f8adde43914841d18e31337e0bb"} Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.394862 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-d75dcc4b8-m2d8t" podStartSLOduration=3.394839999 podStartE2EDuration="3.394839999s" podCreationTimestamp="2026-01-29 13:37:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:46.380657842 +0000 UTC m=+1305.141918128" watchObservedRunningTime="2026-01-29 13:37:46.394839999 +0000 UTC m=+1305.156100275" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.405849 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.414302 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" podStartSLOduration=3.414285463 podStartE2EDuration="3.414285463s" podCreationTimestamp="2026-01-29 13:37:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:46.406086791 +0000 UTC m=+1305.167347067" watchObservedRunningTime="2026-01-29 13:37:46.414285463 +0000 UTC m=+1305.175545739" Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.471491 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:37:46 crc kubenswrapper[4787]: W0129 13:37:46.523010 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c925c65_213d_4981_83f5_55a4946c69e0.slice/crio-510c37d761fb6c3798588be00e23478fb83901ad9013a0e8b2f9e553acb170d5 WatchSource:0}: Error finding container 510c37d761fb6c3798588be00e23478fb83901ad9013a0e8b2f9e553acb170d5: Status 404 returned error can't find the container with id 510c37d761fb6c3798588be00e23478fb83901ad9013a0e8b2f9e553acb170d5 Jan 29 13:37:46 crc kubenswrapper[4787]: I0129 13:37:46.562071 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.386248 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerStarted","Data":"510c37d761fb6c3798588be00e23478fb83901ad9013a0e8b2f9e553acb170d5"} Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.388108 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerStarted","Data":"b9f2a05b52ca4affcc627a5e1fd4360177b9d05de2af583783b144dd5da42312"} Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.389869 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerStarted","Data":"3f3bf84ba0d1f0659bcae6c1c1a203f8f8c43d7e99c94413e804b6210fd403db"} Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.392130 4787 generic.go:334] "Generic (PLEG): container finished" podID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerID="2606d1d1fd7e3134e40393f8a812eda120def5e8c3125b8d18d7b50b8e1a5221" exitCode=0 Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.392248 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" event={"ID":"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f","Type":"ContainerDied","Data":"2606d1d1fd7e3134e40393f8a812eda120def5e8c3125b8d18d7b50b8e1a5221"} Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.395581 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerStarted","Data":"e1e39ec84e4c856895c68934ffe37b579202f55f241d2985ef82c3428f5a54da"} Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.777612 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.851991 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4v66\" (UniqueName: \"kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.852041 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.852214 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.852242 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.852274 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.852309 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb\") pod \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\" (UID: \"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f\") " Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.858299 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66" (OuterVolumeSpecName: "kube-api-access-m4v66") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "kube-api-access-m4v66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.923242 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.934736 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.940053 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.953800 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4v66\" (UniqueName: \"kubernetes.io/projected/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-kube-api-access-m4v66\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.953827 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.953837 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.953847 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:47 crc kubenswrapper[4787]: I0129 13:37:47.968992 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.031126 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config" (OuterVolumeSpecName: "config") pod "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" (UID: "9db31e9c-3b01-4b05-99b0-06ab2bdbd69f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.056017 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.056053 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.405024 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerStarted","Data":"68112c19e229f11da5b51e910d829b70241f87ef6d43476667a4d0fa295cb995"} Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.407050 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" event={"ID":"9db31e9c-3b01-4b05-99b0-06ab2bdbd69f","Type":"ContainerDied","Data":"2662d752c425d3ba9e86890f04b0885b96d02a8e09bb8d67d692384d3bee14cc"} Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.407107 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54c4dfcffc-jgw6j" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.407112 4787 scope.go:117] "RemoveContainer" containerID="2606d1d1fd7e3134e40393f8a812eda120def5e8c3125b8d18d7b50b8e1a5221" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.409569 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerStarted","Data":"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718"} Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.409916 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.454831 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.463043 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54c4dfcffc-jgw6j"] Jan 29 13:37:48 crc kubenswrapper[4787]: I0129 13:37:48.464958 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-85444c4b89-hx4zl" podStartSLOduration=4.464939342 podStartE2EDuration="4.464939342s" podCreationTimestamp="2026-01-29 13:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:48.464700846 +0000 UTC m=+1307.225961122" watchObservedRunningTime="2026-01-29 13:37:48.464939342 +0000 UTC m=+1307.226199618" Jan 29 13:37:49 crc kubenswrapper[4787]: I0129 13:37:49.419487 4787 generic.go:334] "Generic (PLEG): container finished" podID="6c925c65-213d-4981-83f5-55a4946c69e0" containerID="8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718" exitCode=0 Jan 29 13:37:49 crc kubenswrapper[4787]: I0129 13:37:49.419589 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerDied","Data":"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718"} Jan 29 13:37:49 crc kubenswrapper[4787]: I0129 13:37:49.994814 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" path="/var/lib/kubelet/pods/9db31e9c-3b01-4b05-99b0-06ab2bdbd69f/volumes" Jan 29 13:37:50 crc kubenswrapper[4787]: I0129 13:37:50.528627 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:51 crc kubenswrapper[4787]: I0129 13:37:51.009346 4787 scope.go:117] "RemoveContainer" containerID="11b44a359668a2d3b56bcaa9ef64d66e6f3aaa9fbfac978b14bdca0fd4d7f108" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.467997 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerStarted","Data":"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.468550 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.481644 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerStarted","Data":"3a6f06c29c2b8ece3e099b4c0fe582fa992c2a7639e41470e3b5d3055e876d92"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.481804 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api-log" containerID="cri-o://68112c19e229f11da5b51e910d829b70241f87ef6d43476667a4d0fa295cb995" gracePeriod=30 Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.482040 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.482075 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api" containerID="cri-o://3a6f06c29c2b8ece3e099b4c0fe582fa992c2a7639e41470e3b5d3055e876d92" gracePeriod=30 Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.502816 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" podStartSLOduration=7.502794859 podStartE2EDuration="7.502794859s" podCreationTimestamp="2026-01-29 13:37:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:52.500859189 +0000 UTC m=+1311.262119455" watchObservedRunningTime="2026-01-29 13:37:52.502794859 +0000 UTC m=+1311.264055135" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.512685 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerStarted","Data":"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.512737 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerStarted","Data":"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.559617 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerStarted","Data":"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.559665 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerStarted","Data":"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f"} Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.576758 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.576742033 podStartE2EDuration="7.576742033s" podCreationTimestamp="2026-01-29 13:37:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:52.546906871 +0000 UTC m=+1311.308167147" watchObservedRunningTime="2026-01-29 13:37:52.576742033 +0000 UTC m=+1311.338002299" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.611889 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" podStartSLOduration=2.581865983 podStartE2EDuration="9.611873693s" podCreationTimestamp="2026-01-29 13:37:43 +0000 UTC" firstStartedPulling="2026-01-29 13:37:44.395652472 +0000 UTC m=+1303.156912738" lastFinishedPulling="2026-01-29 13:37:51.425660172 +0000 UTC m=+1310.186920448" observedRunningTime="2026-01-29 13:37:52.580515231 +0000 UTC m=+1311.341775507" watchObservedRunningTime="2026-01-29 13:37:52.611873693 +0000 UTC m=+1311.373133969" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.623412 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:37:52 crc kubenswrapper[4787]: E0129 13:37:52.623877 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="dnsmasq-dns" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.623895 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="dnsmasq-dns" Jan 29 13:37:52 crc kubenswrapper[4787]: E0129 13:37:52.623915 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="init" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.623922 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="init" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.624097 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9db31e9c-3b01-4b05-99b0-06ab2bdbd69f" containerName="dnsmasq-dns" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.628528 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.633861 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.634018 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.639059 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-64fc7f548f-h8fjw" podStartSLOduration=2.559915454 podStartE2EDuration="9.639035996s" podCreationTimestamp="2026-01-29 13:37:43 +0000 UTC" firstStartedPulling="2026-01-29 13:37:44.342853825 +0000 UTC m=+1303.104114101" lastFinishedPulling="2026-01-29 13:37:51.421974367 +0000 UTC m=+1310.183234643" observedRunningTime="2026-01-29 13:37:52.616994435 +0000 UTC m=+1311.378254721" watchObservedRunningTime="2026-01-29 13:37:52.639035996 +0000 UTC m=+1311.400296272" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.662004 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.770857 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.770939 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.770960 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.770992 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.771068 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkz9l\" (UniqueName: \"kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.771084 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.771159 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873107 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873225 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873248 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873302 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873326 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkz9l\" (UniqueName: \"kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873345 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.873472 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.919583 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.919892 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.919948 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.920032 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.921359 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.921936 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkz9l\" (UniqueName: \"kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.922252 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data\") pod \"barbican-api-5687c787c6-cdl5t\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:52 crc kubenswrapper[4787]: I0129 13:37:52.995916 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.574925 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerStarted","Data":"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6"} Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.578683 4787 generic.go:334] "Generic (PLEG): container finished" podID="8692e109-49d4-4983-a241-ad8180ea3610" containerID="3a6f06c29c2b8ece3e099b4c0fe582fa992c2a7639e41470e3b5d3055e876d92" exitCode=0 Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.578712 4787 generic.go:334] "Generic (PLEG): container finished" podID="8692e109-49d4-4983-a241-ad8180ea3610" containerID="68112c19e229f11da5b51e910d829b70241f87ef6d43476667a4d0fa295cb995" exitCode=143 Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.579724 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerDied","Data":"3a6f06c29c2b8ece3e099b4c0fe582fa992c2a7639e41470e3b5d3055e876d92"} Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.579764 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerDied","Data":"68112c19e229f11da5b51e910d829b70241f87ef6d43476667a4d0fa295cb995"} Jan 29 13:37:53 crc kubenswrapper[4787]: I0129 13:37:53.591782 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.156431 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.308667 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.308724 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.308785 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.308871 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.308944 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.309102 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.309180 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n4pv\" (UniqueName: \"kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv\") pod \"8692e109-49d4-4983-a241-ad8180ea3610\" (UID: \"8692e109-49d4-4983-a241-ad8180ea3610\") " Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.309417 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.309751 4787 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8692e109-49d4-4983-a241-ad8180ea3610-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.310480 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs" (OuterVolumeSpecName: "logs") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.317552 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.317685 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv" (OuterVolumeSpecName: "kube-api-access-2n4pv") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "kube-api-access-2n4pv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.319584 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts" (OuterVolumeSpecName: "scripts") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.344309 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.386614 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data" (OuterVolumeSpecName: "config-data") pod "8692e109-49d4-4983-a241-ad8180ea3610" (UID: "8692e109-49d4-4983-a241-ad8180ea3610"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411532 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411568 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n4pv\" (UniqueName: \"kubernetes.io/projected/8692e109-49d4-4983-a241-ad8180ea3610-kube-api-access-2n4pv\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411582 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411590 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411599 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8692e109-49d4-4983-a241-ad8180ea3610-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.411611 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8692e109-49d4-4983-a241-ad8180ea3610-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.589975 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerStarted","Data":"483fbd8d88259f502b0982cdbf412937c30e55091fb48778417b91b9a155bfca"} Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.590034 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.590044 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerStarted","Data":"1409e2db8a0ffd4d94c91a11022596ab339a612fb26e6871a45ae7940cf15a10"} Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.590055 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerStarted","Data":"4ff24cdac41bb221041b39bdc8cfeb3cb5a9238340b306c6866ecc06e7a9372d"} Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.590067 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.591935 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerStarted","Data":"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f"} Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.593759 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.593740 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8692e109-49d4-4983-a241-ad8180ea3610","Type":"ContainerDied","Data":"b9f2a05b52ca4affcc627a5e1fd4360177b9d05de2af583783b144dd5da42312"} Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.593913 4787 scope.go:117] "RemoveContainer" containerID="3a6f06c29c2b8ece3e099b4c0fe582fa992c2a7639e41470e3b5d3055e876d92" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.613894 4787 scope.go:117] "RemoveContainer" containerID="68112c19e229f11da5b51e910d829b70241f87ef6d43476667a4d0fa295cb995" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.625688 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5687c787c6-cdl5t" podStartSLOduration=2.625673118 podStartE2EDuration="2.625673118s" podCreationTimestamp="2026-01-29 13:37:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:54.622445094 +0000 UTC m=+1313.383705370" watchObservedRunningTime="2026-01-29 13:37:54.625673118 +0000 UTC m=+1313.386933384" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.653074 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.666740 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.684772 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:54 crc kubenswrapper[4787]: E0129 13:37:54.685159 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.685177 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api" Jan 29 13:37:54 crc kubenswrapper[4787]: E0129 13:37:54.685190 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api-log" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.685195 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api-log" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.685394 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.685412 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8692e109-49d4-4983-a241-ad8180ea3610" containerName="cinder-api-log" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.686299 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.688225 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.688416 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.688747 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.697160 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.164029981 podStartE2EDuration="9.697141728s" podCreationTimestamp="2026-01-29 13:37:45 +0000 UTC" firstStartedPulling="2026-01-29 13:37:46.419816236 +0000 UTC m=+1305.181076512" lastFinishedPulling="2026-01-29 13:37:51.952927973 +0000 UTC m=+1310.714188259" observedRunningTime="2026-01-29 13:37:54.683334741 +0000 UTC m=+1313.444595017" watchObservedRunningTime="2026-01-29 13:37:54.697141728 +0000 UTC m=+1313.458402004" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.728337 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.818833 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.818896 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.818947 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.818984 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.819018 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7chxw\" (UniqueName: \"kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.819117 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.819152 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.819176 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.819300 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920594 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920653 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920688 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920713 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7chxw\" (UniqueName: \"kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920758 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920786 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920805 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920860 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920904 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.920719 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.921445 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.937123 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.940923 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.941608 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.945010 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.945231 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7chxw\" (UniqueName: \"kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.945983 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:54 crc kubenswrapper[4787]: I0129 13:37:54.946267 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data\") pod \"cinder-api-0\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " pod="openstack/cinder-api-0" Jan 29 13:37:55 crc kubenswrapper[4787]: I0129 13:37:55.001725 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:37:55 crc kubenswrapper[4787]: I0129 13:37:55.522621 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:37:55 crc kubenswrapper[4787]: W0129 13:37:55.547175 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1287d5ec_d072_43ba_b553_6d2d229b7c6c.slice/crio-258cf7aae6f3788f6fbd3eb95fd920528e7b1817b0a99e287db2d7d91d98785c WatchSource:0}: Error finding container 258cf7aae6f3788f6fbd3eb95fd920528e7b1817b0a99e287db2d7d91d98785c: Status 404 returned error can't find the container with id 258cf7aae6f3788f6fbd3eb95fd920528e7b1817b0a99e287db2d7d91d98785c Jan 29 13:37:55 crc kubenswrapper[4787]: I0129 13:37:55.668310 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerStarted","Data":"258cf7aae6f3788f6fbd3eb95fd920528e7b1817b0a99e287db2d7d91d98785c"} Jan 29 13:37:55 crc kubenswrapper[4787]: I0129 13:37:55.804592 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 13:37:55 crc kubenswrapper[4787]: I0129 13:37:55.999321 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8692e109-49d4-4983-a241-ad8180ea3610" path="/var/lib/kubelet/pods/8692e109-49d4-4983-a241-ad8180ea3610/volumes" Jan 29 13:37:56 crc kubenswrapper[4787]: I0129 13:37:56.457478 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:56 crc kubenswrapper[4787]: I0129 13:37:56.683002 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerStarted","Data":"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a"} Jan 29 13:37:56 crc kubenswrapper[4787]: I0129 13:37:56.734212 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:37:57 crc kubenswrapper[4787]: I0129 13:37:57.353597 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:57 crc kubenswrapper[4787]: I0129 13:37:57.355611 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 13:37:57 crc kubenswrapper[4787]: I0129 13:37:57.368075 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 13:37:57 crc kubenswrapper[4787]: I0129 13:37:57.379998 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 13:37:57 crc kubenswrapper[4787]: I0129 13:37:57.695663 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerStarted","Data":"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f"} Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.393949 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.394021 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.394070 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.394924 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.395007 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94" gracePeriod=600 Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.715503 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94" exitCode=0 Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.715561 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94"} Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.716008 4787 scope.go:117] "RemoveContainer" containerID="df9e8740bf151a75c689d168b226a7f5a7159a6e915923c5f7df0f22fffdf98a" Jan 29 13:37:58 crc kubenswrapper[4787]: I0129 13:37:58.716243 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 29 13:37:59 crc kubenswrapper[4787]: I0129 13:37:59.731273 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0"} Jan 29 13:37:59 crc kubenswrapper[4787]: I0129 13:37:59.753584 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.753562075 podStartE2EDuration="5.753562075s" podCreationTimestamp="2026-01-29 13:37:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:37:57.719058224 +0000 UTC m=+1316.480318500" watchObservedRunningTime="2026-01-29 13:37:59.753562075 +0000 UTC m=+1318.514822351" Jan 29 13:38:00 crc kubenswrapper[4787]: I0129 13:38:00.104309 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:38:00 crc kubenswrapper[4787]: I0129 13:38:00.956787 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.021229 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.021444 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="dnsmasq-dns" containerID="cri-o://257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50" gracePeriod=10 Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.274687 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.328340 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.559875 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.656859 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pftq\" (UniqueName: \"kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.656948 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.657026 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.657062 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.657089 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.657160 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb\") pod \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\" (UID: \"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b\") " Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.688754 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq" (OuterVolumeSpecName: "kube-api-access-8pftq") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "kube-api-access-8pftq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.718581 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.720433 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.728553 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.738138 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.742936 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config" (OuterVolumeSpecName: "config") pod "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" (UID: "b9a222e3-4b19-4932-b08b-b7bb8d2edb0b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.760611 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pftq\" (UniqueName: \"kubernetes.io/projected/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-kube-api-access-8pftq\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.760924 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.761015 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.761095 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.761186 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.761261 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763144 4787 generic.go:334] "Generic (PLEG): container finished" podID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerID="257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50" exitCode=0 Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerDied","Data":"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50"} Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763359 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" event={"ID":"b9a222e3-4b19-4932-b08b-b7bb8d2edb0b","Type":"ContainerDied","Data":"379f2b74dcb65a95088ed633348c8d73827293656c2488e1e97b7647d02fb2d7"} Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763364 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="cinder-scheduler" containerID="cri-o://8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6" gracePeriod=30 Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763387 4787 scope.go:117] "RemoveContainer" containerID="257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763615 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.763842 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="probe" containerID="cri-o://ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f" gracePeriod=30 Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.809003 4787 scope.go:117] "RemoveContainer" containerID="52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.828801 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.837863 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66567888d7-vtkd9"] Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.841556 4787 scope.go:117] "RemoveContainer" containerID="257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50" Jan 29 13:38:01 crc kubenswrapper[4787]: E0129 13:38:01.842055 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50\": container with ID starting with 257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50 not found: ID does not exist" containerID="257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.842091 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50"} err="failed to get container status \"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50\": rpc error: code = NotFound desc = could not find container \"257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50\": container with ID starting with 257aec4040241f72bb904d7032f497ffcf3e3fa88ee5517e00cf3cf3bf32db50 not found: ID does not exist" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.842113 4787 scope.go:117] "RemoveContainer" containerID="52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332" Jan 29 13:38:01 crc kubenswrapper[4787]: E0129 13:38:01.842365 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332\": container with ID starting with 52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332 not found: ID does not exist" containerID="52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332" Jan 29 13:38:01 crc kubenswrapper[4787]: I0129 13:38:01.842395 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332"} err="failed to get container status \"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332\": rpc error: code = NotFound desc = could not find container \"52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332\": container with ID starting with 52e3a56a949b658abb4b23e77db684fdc73ee5842b8bf4f2790a0c8a40a6e332 not found: ID does not exist" Jan 29 13:38:02 crc kubenswrapper[4787]: I0129 13:38:02.003931 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" path="/var/lib/kubelet/pods/b9a222e3-4b19-4932-b08b-b7bb8d2edb0b/volumes" Jan 29 13:38:02 crc kubenswrapper[4787]: I0129 13:38:02.230422 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:38:02 crc kubenswrapper[4787]: I0129 13:38:02.233442 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:38:02 crc kubenswrapper[4787]: I0129 13:38:02.783717 4787 generic.go:334] "Generic (PLEG): container finished" podID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerID="ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f" exitCode=0 Jan 29 13:38:02 crc kubenswrapper[4787]: I0129 13:38:02.784167 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerDied","Data":"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f"} Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.222705 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 29 13:38:04 crc kubenswrapper[4787]: E0129 13:38:04.223781 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="dnsmasq-dns" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.223798 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="dnsmasq-dns" Jan 29 13:38:04 crc kubenswrapper[4787]: E0129 13:38:04.223842 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="init" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.223850 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="init" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.224068 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="dnsmasq-dns" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.224805 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.226793 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.226970 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-h4c7r" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.227245 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.233172 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.308162 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.308246 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.308329 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.308390 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dbjf\" (UniqueName: \"kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.410020 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.410068 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.410132 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.410192 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dbjf\" (UniqueName: \"kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.410964 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.416761 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.418041 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.432204 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dbjf\" (UniqueName: \"kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf\") pod \"openstackclient\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.434693 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.544660 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.628321 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.698984 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.699281 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d75dcc4b8-m2d8t" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api" containerID="cri-o://495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f" gracePeriod=30 Jan 29 13:38:04 crc kubenswrapper[4787]: I0129 13:38:04.707995 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-d75dcc4b8-m2d8t" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api-log" containerID="cri-o://17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8" gracePeriod=30 Jan 29 13:38:05 crc kubenswrapper[4787]: I0129 13:38:05.138696 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 29 13:38:05 crc kubenswrapper[4787]: I0129 13:38:05.816085 4787 generic.go:334] "Generic (PLEG): container finished" podID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerID="17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8" exitCode=143 Jan 29 13:38:05 crc kubenswrapper[4787]: I0129 13:38:05.816267 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerDied","Data":"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8"} Jan 29 13:38:05 crc kubenswrapper[4787]: I0129 13:38:05.818223 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7f67df75-67c0-4609-9afe-caa099a5ad1e","Type":"ContainerStarted","Data":"0fbd433f5cb04eeac7cc4d4ab32228ba4785a9f64993b65614c442f45dab5bb7"} Jan 29 13:38:05 crc kubenswrapper[4787]: W0129 13:38:05.874808 4787 container.go:586] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70ed07cc_545f_4449_a9e6_90906fd77ec7.slice/crio-3f3bf84ba0d1f0659bcae6c1c1a203f8f8c43d7e99c94413e804b6210fd403db": error while statting cgroup v2: [unable to parse /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70ed07cc_545f_4449_a9e6_90906fd77ec7.slice/crio-3f3bf84ba0d1f0659bcae6c1c1a203f8f8c43d7e99c94413e804b6210fd403db/memory.stat: read /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70ed07cc_545f_4449_a9e6_90906fd77ec7.slice/crio-3f3bf84ba0d1f0659bcae6c1c1a203f8f8c43d7e99c94413e804b6210fd403db/memory.stat: no such device], continuing to push stats Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.292789 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.353087 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlsvt\" (UniqueName: \"kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.359729 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.359798 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.359862 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.359920 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.360003 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id\") pod \"70ed07cc-545f-4449-a9e6-90906fd77ec7\" (UID: \"70ed07cc-545f-4449-a9e6-90906fd77ec7\") " Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.361070 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.368110 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-66567888d7-vtkd9" podUID="b9a222e3-4b19-4932-b08b-b7bb8d2edb0b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.148:5353: i/o timeout" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.368400 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt" (OuterVolumeSpecName: "kube-api-access-nlsvt") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "kube-api-access-nlsvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.371051 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts" (OuterVolumeSpecName: "scripts") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.381235 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.458182 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.464408 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.464433 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.464442 4787 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/70ed07cc-545f-4449-a9e6-90906fd77ec7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.464871 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlsvt\" (UniqueName: \"kubernetes.io/projected/70ed07cc-545f-4449-a9e6-90906fd77ec7-kube-api-access-nlsvt\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.464897 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.497883 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data" (OuterVolumeSpecName: "config-data") pod "70ed07cc-545f-4449-a9e6-90906fd77ec7" (UID: "70ed07cc-545f-4449-a9e6-90906fd77ec7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.566717 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/70ed07cc-545f-4449-a9e6-90906fd77ec7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.837130 4787 generic.go:334] "Generic (PLEG): container finished" podID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerID="8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6" exitCode=0 Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.837180 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerDied","Data":"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6"} Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.837185 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.837211 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"70ed07cc-545f-4449-a9e6-90906fd77ec7","Type":"ContainerDied","Data":"3f3bf84ba0d1f0659bcae6c1c1a203f8f8c43d7e99c94413e804b6210fd403db"} Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.837234 4787 scope.go:117] "RemoveContainer" containerID="ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.899192 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.903700 4787 scope.go:117] "RemoveContainer" containerID="8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.908018 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.926081 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:06 crc kubenswrapper[4787]: E0129 13:38:06.926552 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="probe" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.926575 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="probe" Jan 29 13:38:06 crc kubenswrapper[4787]: E0129 13:38:06.926590 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="cinder-scheduler" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.926598 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="cinder-scheduler" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.926816 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="cinder-scheduler" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.926835 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" containerName="probe" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.928011 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.930850 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 29 13:38:06 crc kubenswrapper[4787]: I0129 13:38:06.938598 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.015396 4787 scope.go:117] "RemoveContainer" containerID="ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f" Jan 29 13:38:07 crc kubenswrapper[4787]: E0129 13:38:07.016160 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f\": container with ID starting with ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f not found: ID does not exist" containerID="ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.016237 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f"} err="failed to get container status \"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f\": rpc error: code = NotFound desc = could not find container \"ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f\": container with ID starting with ab380ba4dc57fb0fc395bac7b61a7ec84be664ea12e8291a15144c9e4186389f not found: ID does not exist" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.016272 4787 scope.go:117] "RemoveContainer" containerID="8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6" Jan 29 13:38:07 crc kubenswrapper[4787]: E0129 13:38:07.018614 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6\": container with ID starting with 8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6 not found: ID does not exist" containerID="8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.018756 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6"} err="failed to get container status \"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6\": rpc error: code = NotFound desc = could not find container \"8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6\": container with ID starting with 8d9d533520314a1a68a9343e8c9cc4053551b4f19d0e47fcfe7b90137e7546c6 not found: ID does not exist" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075039 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075100 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075206 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075238 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075257 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.075299 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snp6f\" (UniqueName: \"kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.121382 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.181281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.181371 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.181520 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snp6f\" (UniqueName: \"kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.181613 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.181716 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.182030 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.182521 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.187211 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.187516 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.187556 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.187846 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.203613 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snp6f\" (UniqueName: \"kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f\") pod \"cinder-scheduler-0\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.274259 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.819442 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:38:07 crc kubenswrapper[4787]: W0129 13:38:07.835622 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9df3779_71e8_4441_a410_d4fe2fb2267e.slice/crio-6f3bf64a03ab83f7f7c99e9c24292e51e46c181bb084e6c70f19e1f08178c01e WatchSource:0}: Error finding container 6f3bf64a03ab83f7f7c99e9c24292e51e46c181bb084e6c70f19e1f08178c01e: Status 404 returned error can't find the container with id 6f3bf64a03ab83f7f7c99e9c24292e51e46c181bb084e6c70f19e1f08178c01e Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.847864 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerStarted","Data":"6f3bf64a03ab83f7f7c99e9c24292e51e46c181bb084e6c70f19e1f08178c01e"} Jan 29 13:38:07 crc kubenswrapper[4787]: I0129 13:38:07.997959 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70ed07cc-545f-4449-a9e6-90906fd77ec7" path="/var/lib/kubelet/pods/70ed07cc-545f-4449-a9e6-90906fd77ec7/volumes" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.270419 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.307982 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle\") pod \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.308050 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs\") pod \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.308100 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data\") pod \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.308821 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs" (OuterVolumeSpecName: "logs") pod "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" (UID: "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.309412 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9lnt\" (UniqueName: \"kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt\") pod \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.309474 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom\") pod \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\" (UID: \"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.310861 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.317594 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" (UID: "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.322591 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt" (OuterVolumeSpecName: "kube-api-access-t9lnt") pod "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" (UID: "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5"). InnerVolumeSpecName "kube-api-access-t9lnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.340827 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" (UID: "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.368817 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data" (OuterVolumeSpecName: "config-data") pod "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" (UID: "4fd02e5a-4fc3-46f4-b5a5-42304f4601d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.421808 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.421837 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.421846 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9lnt\" (UniqueName: \"kubernetes.io/projected/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-kube-api-access-t9lnt\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.421855 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.445756 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:38:08 crc kubenswrapper[4787]: E0129 13:38:08.446299 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.446322 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api" Jan 29 13:38:08 crc kubenswrapper[4787]: E0129 13:38:08.446349 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api-log" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.446358 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api-log" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.446550 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api-log" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.446577 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerName="barbican-api" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.447760 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.477102 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.479265 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.479967 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.492144 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554166 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554570 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554697 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554864 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554930 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.554966 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph7vp\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.555043 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.555080 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.657954 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658033 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658069 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658095 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph7vp\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658170 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658227 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.658304 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.659402 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.664490 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.671427 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.676169 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.677607 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.678757 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.682192 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.682996 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.722320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph7vp\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp\") pod \"swift-proxy-77bffb9b6f-5z6t5\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759123 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt7tf\" (UniqueName: \"kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759530 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759601 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759694 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759725 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759853 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.759886 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts\") pod \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\" (UID: \"e13248de-b9b0-4027-bc49-e5a6ea72cf71\") " Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.761431 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.761787 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.764470 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts" (OuterVolumeSpecName: "scripts") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.765644 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf" (OuterVolumeSpecName: "kube-api-access-nt7tf") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "kube-api-access-nt7tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.821632 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.827873 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.857463 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862649 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862679 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862690 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862701 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt7tf\" (UniqueName: \"kubernetes.io/projected/e13248de-b9b0-4027-bc49-e5a6ea72cf71-kube-api-access-nt7tf\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862710 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.862718 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e13248de-b9b0-4027-bc49-e5a6ea72cf71-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.876219 4787 generic.go:334] "Generic (PLEG): container finished" podID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" containerID="495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f" exitCode=0 Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.876294 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerDied","Data":"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f"} Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.876326 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d75dcc4b8-m2d8t" event={"ID":"4fd02e5a-4fc3-46f4-b5a5-42304f4601d5","Type":"ContainerDied","Data":"981f9fc060eda2538ea1ad43ada1ca92afd841eb1a3a3377163e7b67e1e900f0"} Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.876347 4787 scope.go:117] "RemoveContainer" containerID="495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.876590 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d75dcc4b8-m2d8t" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.890704 4787 generic.go:334] "Generic (PLEG): container finished" podID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerID="302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937" exitCode=137 Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.890758 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerDied","Data":"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937"} Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.890792 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e13248de-b9b0-4027-bc49-e5a6ea72cf71","Type":"ContainerDied","Data":"17319a62c07409d42a8a80ebbdd4927a463b32fbc31640b2522725374411ae71"} Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.890863 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.891066 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data" (OuterVolumeSpecName: "config-data") pod "e13248de-b9b0-4027-bc49-e5a6ea72cf71" (UID: "e13248de-b9b0-4027-bc49-e5a6ea72cf71"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.924215 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.935194 4787 scope.go:117] "RemoveContainer" containerID="17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.942562 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-d75dcc4b8-m2d8t"] Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.966749 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e13248de-b9b0-4027-bc49-e5a6ea72cf71-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.988552 4787 scope.go:117] "RemoveContainer" containerID="495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f" Jan 29 13:38:08 crc kubenswrapper[4787]: E0129 13:38:08.991802 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f\": container with ID starting with 495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f not found: ID does not exist" containerID="495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.991841 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f"} err="failed to get container status \"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f\": rpc error: code = NotFound desc = could not find container \"495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f\": container with ID starting with 495747f5d44bfce571d12f5cb292ecd7bb1bf9c876eb0bf790b1071e305a459f not found: ID does not exist" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.991876 4787 scope.go:117] "RemoveContainer" containerID="17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8" Jan 29 13:38:08 crc kubenswrapper[4787]: E0129 13:38:08.993582 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8\": container with ID starting with 17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8 not found: ID does not exist" containerID="17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.993631 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8"} err="failed to get container status \"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8\": rpc error: code = NotFound desc = could not find container \"17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8\": container with ID starting with 17e79fe40aec0f469a349c1a63162ba61715111b8a4a8522ce464f57c47e50d8 not found: ID does not exist" Jan 29 13:38:08 crc kubenswrapper[4787]: I0129 13:38:08.993669 4787 scope.go:117] "RemoveContainer" containerID="302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.043702 4787 scope.go:117] "RemoveContainer" containerID="1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.088789 4787 scope.go:117] "RemoveContainer" containerID="d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.134936 4787 scope.go:117] "RemoveContainer" containerID="302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937" Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.135412 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937\": container with ID starting with 302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937 not found: ID does not exist" containerID="302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.135438 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937"} err="failed to get container status \"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937\": rpc error: code = NotFound desc = could not find container \"302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937\": container with ID starting with 302f43f87c32eb7828e3cd9179dd415cecccadcc31f55b064b234478eb07f937 not found: ID does not exist" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.135478 4787 scope.go:117] "RemoveContainer" containerID="1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2" Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.135897 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2\": container with ID starting with 1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2 not found: ID does not exist" containerID="1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.135914 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2"} err="failed to get container status \"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2\": rpc error: code = NotFound desc = could not find container \"1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2\": container with ID starting with 1b9200bdc292fc6051b6b5df2a5e432f6df88ade272ed76b7268d1509bb17fc2 not found: ID does not exist" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.135934 4787 scope.go:117] "RemoveContainer" containerID="d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534" Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.140951 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534\": container with ID starting with d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534 not found: ID does not exist" containerID="d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.140984 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534"} err="failed to get container status \"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534\": rpc error: code = NotFound desc = could not find container \"d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534\": container with ID starting with d37f798be5251754538820c68f4be8be3574f2cb42a2bdbfab81018ea3f7f534 not found: ID does not exist" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.280537 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.305227 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.333509 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.333953 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="sg-core" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.333968 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="sg-core" Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.333993 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="proxy-httpd" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.334001 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="proxy-httpd" Jan 29 13:38:09 crc kubenswrapper[4787]: E0129 13:38:09.334013 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="ceilometer-notification-agent" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.334022 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="ceilometer-notification-agent" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.334228 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="proxy-httpd" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.334251 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="ceilometer-notification-agent" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.334266 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" containerName="sg-core" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.336298 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.339788 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.339906 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.343308 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.460370 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.482483 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.482717 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.482869 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.483020 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.483271 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.483482 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvzpg\" (UniqueName: \"kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.483945 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.585493 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.585914 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.585942 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586003 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586038 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586079 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586123 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvzpg\" (UniqueName: \"kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586383 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.586397 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.591655 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.592241 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.592969 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.599768 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.604964 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvzpg\" (UniqueName: \"kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg\") pod \"ceilometer-0\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.662079 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.908830 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerStarted","Data":"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34"} Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.909177 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerStarted","Data":"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971"} Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.913621 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerStarted","Data":"590a7dfe28927af5962f958eb03c0de73f3909c4b35a1e96d83e4eb1b3065948"} Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.913655 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerStarted","Data":"341921462c367e30f02fe0fa0f1eff5bd72a7e8cc914ccf63e1d930dcb777abe"} Jan 29 13:38:09 crc kubenswrapper[4787]: I0129 13:38:09.950520 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.950442822 podStartE2EDuration="3.950442822s" podCreationTimestamp="2026-01-29 13:38:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:09.931718488 +0000 UTC m=+1328.692978774" watchObservedRunningTime="2026-01-29 13:38:09.950442822 +0000 UTC m=+1328.711703198" Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.008369 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fd02e5a-4fc3-46f4-b5a5-42304f4601d5" path="/var/lib/kubelet/pods/4fd02e5a-4fc3-46f4-b5a5-42304f4601d5/volumes" Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.009047 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e13248de-b9b0-4027-bc49-e5a6ea72cf71" path="/var/lib/kubelet/pods/e13248de-b9b0-4027-bc49-e5a6ea72cf71/volumes" Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.146621 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:10 crc kubenswrapper[4787]: W0129 13:38:10.156600 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5628fc46_1cfb_43f9_b296_92e08d847619.slice/crio-edab95840b047fa2baab249f8749d5ae35a2f684c8705ed004aa2a90eb9a72d7 WatchSource:0}: Error finding container edab95840b047fa2baab249f8749d5ae35a2f684c8705ed004aa2a90eb9a72d7: Status 404 returned error can't find the container with id edab95840b047fa2baab249f8749d5ae35a2f684c8705ed004aa2a90eb9a72d7 Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.783670 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.931434 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerStarted","Data":"edab95840b047fa2baab249f8749d5ae35a2f684c8705ed004aa2a90eb9a72d7"} Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.933806 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerStarted","Data":"0486530403f0ac601abe5de0a5af2b6b1794e8ce8171f91595d95c8ce17b47c4"} Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.934086 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.934187 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:10 crc kubenswrapper[4787]: I0129 13:38:10.967990 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" podStartSLOduration=2.967970966 podStartE2EDuration="2.967970966s" podCreationTimestamp="2026-01-29 13:38:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:10.953705136 +0000 UTC m=+1329.714965422" watchObservedRunningTime="2026-01-29 13:38:10.967970966 +0000 UTC m=+1329.729231242" Jan 29 13:38:11 crc kubenswrapper[4787]: I0129 13:38:11.847063 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:38:11 crc kubenswrapper[4787]: I0129 13:38:11.981636 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerStarted","Data":"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9"} Jan 29 13:38:12 crc kubenswrapper[4787]: I0129 13:38:12.275265 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 29 13:38:14 crc kubenswrapper[4787]: I0129 13:38:14.571160 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:38:14 crc kubenswrapper[4787]: I0129 13:38:14.637799 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:38:14 crc kubenswrapper[4787]: I0129 13:38:14.638472 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-587b8b9bbb-dm8wt" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-api" containerID="cri-o://f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11" gracePeriod=30 Jan 29 13:38:14 crc kubenswrapper[4787]: I0129 13:38:14.639169 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-587b8b9bbb-dm8wt" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-httpd" containerID="cri-o://b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea" gracePeriod=30 Jan 29 13:38:16 crc kubenswrapper[4787]: I0129 13:38:16.036090 4787 generic.go:334] "Generic (PLEG): container finished" podID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerID="b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea" exitCode=0 Jan 29 13:38:16 crc kubenswrapper[4787]: I0129 13:38:16.036270 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerDied","Data":"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea"} Jan 29 13:38:17 crc kubenswrapper[4787]: I0129 13:38:17.482990 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 29 13:38:18 crc kubenswrapper[4787]: I0129 13:38:18.058524 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerStarted","Data":"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374"} Jan 29 13:38:18 crc kubenswrapper[4787]: I0129 13:38:18.060340 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7f67df75-67c0-4609-9afe-caa099a5ad1e","Type":"ContainerStarted","Data":"c36704626e89b3205c40733c06c606033c54a72189b14d4d21e965adaeaac743"} Jan 29 13:38:18 crc kubenswrapper[4787]: I0129 13:38:18.077933 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.478753596 podStartE2EDuration="14.077916606s" podCreationTimestamp="2026-01-29 13:38:04 +0000 UTC" firstStartedPulling="2026-01-29 13:38:05.131109515 +0000 UTC m=+1323.892369801" lastFinishedPulling="2026-01-29 13:38:17.730272535 +0000 UTC m=+1336.491532811" observedRunningTime="2026-01-29 13:38:18.07459514 +0000 UTC m=+1336.835855426" watchObservedRunningTime="2026-01-29 13:38:18.077916606 +0000 UTC m=+1336.839176892" Jan 29 13:38:18 crc kubenswrapper[4787]: I0129 13:38:18.831354 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:18 crc kubenswrapper[4787]: I0129 13:38:18.832117 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.069601 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerStarted","Data":"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4"} Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.831484 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.967259 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle\") pod \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.967392 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config\") pod \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.967500 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44rrm\" (UniqueName: \"kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm\") pod \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.967640 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs\") pod \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.967698 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config\") pod \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\" (UID: \"ec4db889-9f8b-40e1-9127-63a9fb91dc1b\") " Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.979284 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ec4db889-9f8b-40e1-9127-63a9fb91dc1b" (UID: "ec4db889-9f8b-40e1-9127-63a9fb91dc1b"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:19 crc kubenswrapper[4787]: I0129 13:38:19.980037 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm" (OuterVolumeSpecName: "kube-api-access-44rrm") pod "ec4db889-9f8b-40e1-9127-63a9fb91dc1b" (UID: "ec4db889-9f8b-40e1-9127-63a9fb91dc1b"). InnerVolumeSpecName "kube-api-access-44rrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.040772 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config" (OuterVolumeSpecName: "config") pod "ec4db889-9f8b-40e1-9127-63a9fb91dc1b" (UID: "ec4db889-9f8b-40e1-9127-63a9fb91dc1b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.041814 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec4db889-9f8b-40e1-9127-63a9fb91dc1b" (UID: "ec4db889-9f8b-40e1-9127-63a9fb91dc1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.070037 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44rrm\" (UniqueName: \"kubernetes.io/projected/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-kube-api-access-44rrm\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.070077 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.070088 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.070097 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.084595 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ec4db889-9f8b-40e1-9127-63a9fb91dc1b" (UID: "ec4db889-9f8b-40e1-9127-63a9fb91dc1b"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.085198 4787 generic.go:334] "Generic (PLEG): container finished" podID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerID="f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11" exitCode=0 Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.085289 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-587b8b9bbb-dm8wt" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.123578 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerDied","Data":"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11"} Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.123638 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-587b8b9bbb-dm8wt" event={"ID":"ec4db889-9f8b-40e1-9127-63a9fb91dc1b","Type":"ContainerDied","Data":"ac240acb6aee7ff1f22cc9881204b13599dadfea4457721cf219d68e8eff9415"} Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.123661 4787 scope.go:117] "RemoveContainer" containerID="b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.149463 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.152529 4787 scope.go:117] "RemoveContainer" containerID="f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.161826 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-587b8b9bbb-dm8wt"] Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.171668 4787 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec4db889-9f8b-40e1-9127-63a9fb91dc1b-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.179215 4787 scope.go:117] "RemoveContainer" containerID="b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea" Jan 29 13:38:20 crc kubenswrapper[4787]: E0129 13:38:20.179979 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea\": container with ID starting with b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea not found: ID does not exist" containerID="b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.180130 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea"} err="failed to get container status \"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea\": rpc error: code = NotFound desc = could not find container \"b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea\": container with ID starting with b5b039edb2403f25e1c0fb085e792a3714e93116d60ac7104d59444f6ef869ea not found: ID does not exist" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.180213 4787 scope.go:117] "RemoveContainer" containerID="f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11" Jan 29 13:38:20 crc kubenswrapper[4787]: E0129 13:38:20.180616 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11\": container with ID starting with f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11 not found: ID does not exist" containerID="f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11" Jan 29 13:38:20 crc kubenswrapper[4787]: I0129 13:38:20.180646 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11"} err="failed to get container status \"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11\": rpc error: code = NotFound desc = could not find container \"f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11\": container with ID starting with f90b0e4aac3703d6c01bda5809f0d437ec8962647cbf63d73f0caee093effe11 not found: ID does not exist" Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.096931 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerStarted","Data":"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088"} Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.097218 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.097135 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="proxy-httpd" containerID="cri-o://116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088" gracePeriod=30 Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.097072 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-central-agent" containerID="cri-o://143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9" gracePeriod=30 Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.097154 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-notification-agent" containerID="cri-o://52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374" gracePeriod=30 Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.099386 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="sg-core" containerID="cri-o://32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4" gracePeriod=30 Jan 29 13:38:21 crc kubenswrapper[4787]: I0129 13:38:21.120476 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.036556931 podStartE2EDuration="12.120440583s" podCreationTimestamp="2026-01-29 13:38:09 +0000 UTC" firstStartedPulling="2026-01-29 13:38:10.159414803 +0000 UTC m=+1328.920675079" lastFinishedPulling="2026-01-29 13:38:20.243298455 +0000 UTC m=+1339.004558731" observedRunningTime="2026-01-29 13:38:21.1149165 +0000 UTC m=+1339.876176786" watchObservedRunningTime="2026-01-29 13:38:21.120440583 +0000 UTC m=+1339.881700859" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.001747 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" path="/var/lib/kubelet/pods/ec4db889-9f8b-40e1-9127-63a9fb91dc1b/volumes" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115341 4787 generic.go:334] "Generic (PLEG): container finished" podID="5628fc46-1cfb-43f9-b296-92e08d847619" containerID="116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088" exitCode=0 Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115377 4787 generic.go:334] "Generic (PLEG): container finished" podID="5628fc46-1cfb-43f9-b296-92e08d847619" containerID="32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4" exitCode=2 Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115391 4787 generic.go:334] "Generic (PLEG): container finished" podID="5628fc46-1cfb-43f9-b296-92e08d847619" containerID="52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374" exitCode=0 Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115406 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerDied","Data":"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088"} Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115445 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerDied","Data":"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4"} Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.115475 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerDied","Data":"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374"} Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.660920 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.713503 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714082 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714238 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714399 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714437 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714606 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714641 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvzpg\" (UniqueName: \"kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714811 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.714877 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts\") pod \"5628fc46-1cfb-43f9-b296-92e08d847619\" (UID: \"5628fc46-1cfb-43f9-b296-92e08d847619\") " Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.715877 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.715906 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5628fc46-1cfb-43f9-b296-92e08d847619-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.719159 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts" (OuterVolumeSpecName: "scripts") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.719648 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg" (OuterVolumeSpecName: "kube-api-access-cvzpg") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "kube-api-access-cvzpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.765803 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.804611 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.817278 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.817312 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.817327 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.817341 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvzpg\" (UniqueName: \"kubernetes.io/projected/5628fc46-1cfb-43f9-b296-92e08d847619-kube-api-access-cvzpg\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.822719 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data" (OuterVolumeSpecName: "config-data") pod "5628fc46-1cfb-43f9-b296-92e08d847619" (UID: "5628fc46-1cfb-43f9-b296-92e08d847619"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:22 crc kubenswrapper[4787]: I0129 13:38:22.918845 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5628fc46-1cfb-43f9-b296-92e08d847619-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.126087 4787 generic.go:334] "Generic (PLEG): container finished" podID="5628fc46-1cfb-43f9-b296-92e08d847619" containerID="143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9" exitCode=0 Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.126144 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.126145 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerDied","Data":"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9"} Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.126301 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5628fc46-1cfb-43f9-b296-92e08d847619","Type":"ContainerDied","Data":"edab95840b047fa2baab249f8749d5ae35a2f684c8705ed004aa2a90eb9a72d7"} Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.126324 4787 scope.go:117] "RemoveContainer" containerID="116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.149663 4787 scope.go:117] "RemoveContainer" containerID="32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.164760 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.175056 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187115 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187562 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="sg-core" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187584 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="sg-core" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187623 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-notification-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187639 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-notification-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187656 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187664 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187676 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="proxy-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187682 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="proxy-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187699 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-central-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187706 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-central-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.187729 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-api" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187736 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-api" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187885 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="proxy-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187899 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-api" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187911 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-notification-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187923 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="ceilometer-central-agent" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187936 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec4db889-9f8b-40e1-9127-63a9fb91dc1b" containerName="neutron-httpd" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.187949 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" containerName="sg-core" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.192682 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.193477 4787 scope.go:117] "RemoveContainer" containerID="52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.197923 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.199631 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.214824 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.224597 4787 scope.go:117] "RemoveContainer" containerID="143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.286522 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-65b45"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.287807 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.293868 4787 scope.go:117] "RemoveContainer" containerID="116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.295349 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088\": container with ID starting with 116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088 not found: ID does not exist" containerID="116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.295414 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088"} err="failed to get container status \"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088\": rpc error: code = NotFound desc = could not find container \"116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088\": container with ID starting with 116b96b2860a794397b40efdb96bf0a7443e018923132aea5d7645dd1272c088 not found: ID does not exist" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.295444 4787 scope.go:117] "RemoveContainer" containerID="32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.295717 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4\": container with ID starting with 32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4 not found: ID does not exist" containerID="32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.295744 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4"} err="failed to get container status \"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4\": rpc error: code = NotFound desc = could not find container \"32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4\": container with ID starting with 32f7c476236fcd288816503fb5ff4e036902ac7d136080efac5201b7bc7a0ee4 not found: ID does not exist" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.295761 4787 scope.go:117] "RemoveContainer" containerID="52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.296620 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374\": container with ID starting with 52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374 not found: ID does not exist" containerID="52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.296642 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374"} err="failed to get container status \"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374\": rpc error: code = NotFound desc = could not find container \"52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374\": container with ID starting with 52dee55e9acf875aebacc7a5e744c01d5dc58070c69f419cc24343708b1e0374 not found: ID does not exist" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.296656 4787 scope.go:117] "RemoveContainer" containerID="143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9" Jan 29 13:38:23 crc kubenswrapper[4787]: E0129 13:38:23.296914 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9\": container with ID starting with 143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9 not found: ID does not exist" containerID="143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.296946 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9"} err="failed to get container status \"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9\": rpc error: code = NotFound desc = could not find container \"143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9\": container with ID starting with 143d38e47408bb3615b75ccec7c6ecf3257b94f63680fc6e73e5542399b84bb9 not found: ID does not exist" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.312485 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-65b45"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327036 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327101 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327129 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327151 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327179 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgc2q\" (UniqueName: \"kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327204 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.327243 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.375445 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-24xfm"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.376528 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.388224 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-24xfm"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428706 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428779 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428820 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428852 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428882 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428906 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq2zv\" (UniqueName: \"kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428929 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgc2q\" (UniqueName: \"kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428955 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.428996 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.436328 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.436635 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.441394 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.441419 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.446055 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.449405 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.465512 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgc2q\" (UniqueName: \"kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q\") pod \"ceilometer-0\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.482471 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-e613-account-create-update-2fvc2"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.485164 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.487961 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.496674 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e613-account-create-update-2fvc2"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.530329 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.530681 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq2zv\" (UniqueName: \"kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.530724 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.530781 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g47ss\" (UniqueName: \"kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.542109 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.556931 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq2zv\" (UniqueName: \"kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv\") pod \"nova-api-db-create-65b45\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.561101 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.618828 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.637785 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.637879 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.637912 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g47ss\" (UniqueName: \"kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.637932 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5mz9\" (UniqueName: \"kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.638625 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.711646 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g47ss\" (UniqueName: \"kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss\") pod \"nova-cell0-db-create-24xfm\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.727004 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.740560 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5mz9\" (UniqueName: \"kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.740694 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.741691 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.759177 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dc6vm"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.797046 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.801025 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5mz9\" (UniqueName: \"kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9\") pod \"nova-api-e613-account-create-update-2fvc2\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.801979 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dc6vm"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.810244 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-z99d7"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.814978 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.816561 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-z99d7"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.817230 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.819389 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.842003 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck6t8\" (UniqueName: \"kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.842049 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.884996 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-bjp2c"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.887830 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.890133 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.892750 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-bjp2c"] Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.943648 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.944052 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck6t8\" (UniqueName: \"kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.944083 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.944169 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6scqd\" (UniqueName: \"kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.944205 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.944238 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6plft\" (UniqueName: \"kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.945541 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:23 crc kubenswrapper[4787]: I0129 13:38:23.965547 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck6t8\" (UniqueName: \"kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8\") pod \"nova-cell1-db-create-dc6vm\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.042696 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5628fc46-1cfb-43f9-b296-92e08d847619" path="/var/lib/kubelet/pods/5628fc46-1cfb-43f9-b296-92e08d847619/volumes" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.048925 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6scqd\" (UniqueName: \"kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.048999 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.049032 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6plft\" (UniqueName: \"kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.049167 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.050282 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.050314 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.083373 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6plft\" (UniqueName: \"kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft\") pod \"nova-cell0-a3c3-account-create-update-z99d7\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.085965 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6scqd\" (UniqueName: \"kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd\") pod \"nova-cell1-06dc-account-create-update-bjp2c\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.127871 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.144984 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.219684 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.445965 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod558f12be_14a0_43bb_9f88_98f6ddafa81f.slice/crio-8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0 WatchSource:0}: Error finding container 8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0: Status 404 returned error can't find the container with id 8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0 Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.449594 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a3f598_761b_4f31_b44d_276f53f9ff54.slice/crio-953605f575348a803f01562a714c2df429420fa6fa1047723136f1c079fec937 WatchSource:0}: Error finding container 953605f575348a803f01562a714c2df429420fa6fa1047723136f1c079fec937: Status 404 returned error can't find the container with id 953605f575348a803f01562a714c2df429420fa6fa1047723136f1c079fec937 Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.452183 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.471167 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-65b45"] Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.590548 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-24xfm"] Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.600341 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod120db9b0_7739_4e28_ba21_4f8bedf3a8d8.slice/crio-5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34 WatchSource:0}: Error finding container 5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34: Status 404 returned error can't find the container with id 5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34 Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.745125 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e613-account-create-update-2fvc2"] Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.763013 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7df2fd4_19d7_4610_ad5a_9738f142c562.slice/crio-0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e WatchSource:0}: Error finding container 0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e: Status 404 returned error can't find the container with id 0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.879544 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dc6vm"] Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.889494 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5955ac52_7d5b_4d18_95c2_c733b868af76.slice/crio-e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0 WatchSource:0}: Error finding container e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0: Status 404 returned error can't find the container with id e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0 Jan 29 13:38:24 crc kubenswrapper[4787]: I0129 13:38:24.894340 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-z99d7"] Jan 29 13:38:24 crc kubenswrapper[4787]: W0129 13:38:24.897445 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91b2f9a1_513a_4fe0_8319_daf196d2afd8.slice/crio-52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a WatchSource:0}: Error finding container 52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a: Status 404 returned error can't find the container with id 52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.033741 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-bjp2c"] Jan 29 13:38:25 crc kubenswrapper[4787]: W0129 13:38:25.047325 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod432d7a0e_772b_4a31_907f_d3c5b9bfe12a.slice/crio-d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab WatchSource:0}: Error finding container d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab: Status 404 returned error can't find the container with id d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.175271 4787 generic.go:334] "Generic (PLEG): container finished" podID="120db9b0-7739-4e28-ba21-4f8bedf3a8d8" containerID="d66315da679b94f0778bd7588ad6dc7fdc2f5546aee3df91a887d454cd5b7ba2" exitCode=0 Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.175336 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-24xfm" event={"ID":"120db9b0-7739-4e28-ba21-4f8bedf3a8d8","Type":"ContainerDied","Data":"d66315da679b94f0778bd7588ad6dc7fdc2f5546aee3df91a887d454cd5b7ba2"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.175364 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-24xfm" event={"ID":"120db9b0-7739-4e28-ba21-4f8bedf3a8d8","Type":"ContainerStarted","Data":"5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.176885 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" event={"ID":"91b2f9a1-513a-4fe0-8319-daf196d2afd8","Type":"ContainerStarted","Data":"b5889b0839f03de550aac1072fe9346c8f6283198062869bde3b352c8aa8ad03"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.176932 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" event={"ID":"91b2f9a1-513a-4fe0-8319-daf196d2afd8","Type":"ContainerStarted","Data":"52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.178067 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" event={"ID":"432d7a0e-772b-4a31-907f-d3c5b9bfe12a","Type":"ContainerStarted","Data":"d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.179468 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dc6vm" event={"ID":"5955ac52-7d5b-4d18-95c2-c733b868af76","Type":"ContainerStarted","Data":"dd512a8155b6324cad09bf4250a8dc3eb9e30fa7e84be8ddea6199714e1a72ab"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.179498 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dc6vm" event={"ID":"5955ac52-7d5b-4d18-95c2-c733b868af76","Type":"ContainerStarted","Data":"e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.182503 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-2fvc2" event={"ID":"a7df2fd4-19d7-4610-ad5a-9738f142c562","Type":"ContainerStarted","Data":"36e79d43872b82fc3af46ec38ec2f9486c3bd213dd9ce4372c2b33fd42a387e4"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.182547 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-2fvc2" event={"ID":"a7df2fd4-19d7-4610-ad5a-9738f142c562","Type":"ContainerStarted","Data":"0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.185543 4787 generic.go:334] "Generic (PLEG): container finished" podID="558f12be-14a0-43bb-9f88-98f6ddafa81f" containerID="8b95125a5a58499681bf23ee00e08e6866abcec3495ceb7c8a8bc0f7ba0498e2" exitCode=0 Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.185620 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-65b45" event={"ID":"558f12be-14a0-43bb-9f88-98f6ddafa81f","Type":"ContainerDied","Data":"8b95125a5a58499681bf23ee00e08e6866abcec3495ceb7c8a8bc0f7ba0498e2"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.185648 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-65b45" event={"ID":"558f12be-14a0-43bb-9f88-98f6ddafa81f","Type":"ContainerStarted","Data":"8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.187534 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerStarted","Data":"953605f575348a803f01562a714c2df429420fa6fa1047723136f1c079fec937"} Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.240798 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-dc6vm" podStartSLOduration=2.240782115 podStartE2EDuration="2.240782115s" podCreationTimestamp="2026-01-29 13:38:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:25.23784956 +0000 UTC m=+1343.999109836" watchObservedRunningTime="2026-01-29 13:38:25.240782115 +0000 UTC m=+1344.002042391" Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.245082 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" podStartSLOduration=2.245066056 podStartE2EDuration="2.245066056s" podCreationTimestamp="2026-01-29 13:38:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:25.218282293 +0000 UTC m=+1343.979542569" watchObservedRunningTime="2026-01-29 13:38:25.245066056 +0000 UTC m=+1344.006326332" Jan 29 13:38:25 crc kubenswrapper[4787]: I0129 13:38:25.262085 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-e613-account-create-update-2fvc2" podStartSLOduration=2.262069237 podStartE2EDuration="2.262069237s" podCreationTimestamp="2026-01-29 13:38:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:25.25871385 +0000 UTC m=+1344.019974126" watchObservedRunningTime="2026-01-29 13:38:25.262069237 +0000 UTC m=+1344.023329513" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.209491 4787 generic.go:334] "Generic (PLEG): container finished" podID="a7df2fd4-19d7-4610-ad5a-9738f142c562" containerID="36e79d43872b82fc3af46ec38ec2f9486c3bd213dd9ce4372c2b33fd42a387e4" exitCode=0 Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.209590 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-2fvc2" event={"ID":"a7df2fd4-19d7-4610-ad5a-9738f142c562","Type":"ContainerDied","Data":"36e79d43872b82fc3af46ec38ec2f9486c3bd213dd9ce4372c2b33fd42a387e4"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.213884 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerStarted","Data":"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.213936 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerStarted","Data":"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.215512 4787 generic.go:334] "Generic (PLEG): container finished" podID="91b2f9a1-513a-4fe0-8319-daf196d2afd8" containerID="b5889b0839f03de550aac1072fe9346c8f6283198062869bde3b352c8aa8ad03" exitCode=0 Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.215564 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" event={"ID":"91b2f9a1-513a-4fe0-8319-daf196d2afd8","Type":"ContainerDied","Data":"b5889b0839f03de550aac1072fe9346c8f6283198062869bde3b352c8aa8ad03"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.217105 4787 generic.go:334] "Generic (PLEG): container finished" podID="432d7a0e-772b-4a31-907f-d3c5b9bfe12a" containerID="6a1ee377dcb7020ee48e27605a40b00d9b78bd8ecb94de501fab6bd3df6c95f7" exitCode=0 Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.217143 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" event={"ID":"432d7a0e-772b-4a31-907f-d3c5b9bfe12a","Type":"ContainerDied","Data":"6a1ee377dcb7020ee48e27605a40b00d9b78bd8ecb94de501fab6bd3df6c95f7"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.218933 4787 generic.go:334] "Generic (PLEG): container finished" podID="5955ac52-7d5b-4d18-95c2-c733b868af76" containerID="dd512a8155b6324cad09bf4250a8dc3eb9e30fa7e84be8ddea6199714e1a72ab" exitCode=0 Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.219045 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dc6vm" event={"ID":"5955ac52-7d5b-4d18-95c2-c733b868af76","Type":"ContainerDied","Data":"dd512a8155b6324cad09bf4250a8dc3eb9e30fa7e84be8ddea6199714e1a72ab"} Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.790799 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.799818 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.901481 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq2zv\" (UniqueName: \"kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv\") pod \"558f12be-14a0-43bb-9f88-98f6ddafa81f\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.901538 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts\") pod \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.901691 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g47ss\" (UniqueName: \"kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss\") pod \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\" (UID: \"120db9b0-7739-4e28-ba21-4f8bedf3a8d8\") " Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.901773 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts\") pod \"558f12be-14a0-43bb-9f88-98f6ddafa81f\" (UID: \"558f12be-14a0-43bb-9f88-98f6ddafa81f\") " Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.904554 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "120db9b0-7739-4e28-ba21-4f8bedf3a8d8" (UID: "120db9b0-7739-4e28-ba21-4f8bedf3a8d8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.906274 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "558f12be-14a0-43bb-9f88-98f6ddafa81f" (UID: "558f12be-14a0-43bb-9f88-98f6ddafa81f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.913732 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss" (OuterVolumeSpecName: "kube-api-access-g47ss") pod "120db9b0-7739-4e28-ba21-4f8bedf3a8d8" (UID: "120db9b0-7739-4e28-ba21-4f8bedf3a8d8"). InnerVolumeSpecName "kube-api-access-g47ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:26 crc kubenswrapper[4787]: I0129 13:38:26.917218 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv" (OuterVolumeSpecName: "kube-api-access-xq2zv") pod "558f12be-14a0-43bb-9f88-98f6ddafa81f" (UID: "558f12be-14a0-43bb-9f88-98f6ddafa81f"). InnerVolumeSpecName "kube-api-access-xq2zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.003858 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g47ss\" (UniqueName: \"kubernetes.io/projected/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-kube-api-access-g47ss\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.003883 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/558f12be-14a0-43bb-9f88-98f6ddafa81f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.003893 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq2zv\" (UniqueName: \"kubernetes.io/projected/558f12be-14a0-43bb-9f88-98f6ddafa81f-kube-api-access-xq2zv\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.003901 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/120db9b0-7739-4e28-ba21-4f8bedf3a8d8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.227555 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-65b45" event={"ID":"558f12be-14a0-43bb-9f88-98f6ddafa81f","Type":"ContainerDied","Data":"8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0"} Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.227641 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ad5bf8245a3f114c44e4e34e838fcd67e045f3a3d46c86faeb9b02786e9caf0" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.227575 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-65b45" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.229917 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerStarted","Data":"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652"} Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.231663 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-24xfm" event={"ID":"120db9b0-7739-4e28-ba21-4f8bedf3a8d8","Type":"ContainerDied","Data":"5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34"} Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.231708 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b738eea9b8015fcd97a6e47ad8a0c39b226a2351453640044d698562c9caf34" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.231842 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-24xfm" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.849440 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.857046 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.865350 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.876837 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918118 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts\") pod \"5955ac52-7d5b-4d18-95c2-c733b868af76\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918155 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts\") pod \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918231 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts\") pod \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918336 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts\") pod \"a7df2fd4-19d7-4610-ad5a-9738f142c562\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918362 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6scqd\" (UniqueName: \"kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd\") pod \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\" (UID: \"432d7a0e-772b-4a31-907f-d3c5b9bfe12a\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918398 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6plft\" (UniqueName: \"kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft\") pod \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\" (UID: \"91b2f9a1-513a-4fe0-8319-daf196d2afd8\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918439 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5mz9\" (UniqueName: \"kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9\") pod \"a7df2fd4-19d7-4610-ad5a-9738f142c562\" (UID: \"a7df2fd4-19d7-4610-ad5a-9738f142c562\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.918500 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck6t8\" (UniqueName: \"kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8\") pod \"5955ac52-7d5b-4d18-95c2-c733b868af76\" (UID: \"5955ac52-7d5b-4d18-95c2-c733b868af76\") " Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.919240 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "91b2f9a1-513a-4fe0-8319-daf196d2afd8" (UID: "91b2f9a1-513a-4fe0-8319-daf196d2afd8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.919593 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7df2fd4-19d7-4610-ad5a-9738f142c562" (UID: "a7df2fd4-19d7-4610-ad5a-9738f142c562"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.919609 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5955ac52-7d5b-4d18-95c2-c733b868af76" (UID: "5955ac52-7d5b-4d18-95c2-c733b868af76"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.919781 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "432d7a0e-772b-4a31-907f-d3c5b9bfe12a" (UID: "432d7a0e-772b-4a31-907f-d3c5b9bfe12a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.927978 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9" (OuterVolumeSpecName: "kube-api-access-q5mz9") pod "a7df2fd4-19d7-4610-ad5a-9738f142c562" (UID: "a7df2fd4-19d7-4610-ad5a-9738f142c562"). InnerVolumeSpecName "kube-api-access-q5mz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.928072 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft" (OuterVolumeSpecName: "kube-api-access-6plft") pod "91b2f9a1-513a-4fe0-8319-daf196d2afd8" (UID: "91b2f9a1-513a-4fe0-8319-daf196d2afd8"). InnerVolumeSpecName "kube-api-access-6plft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.928129 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8" (OuterVolumeSpecName: "kube-api-access-ck6t8") pod "5955ac52-7d5b-4d18-95c2-c733b868af76" (UID: "5955ac52-7d5b-4d18-95c2-c733b868af76"). InnerVolumeSpecName "kube-api-access-ck6t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:27 crc kubenswrapper[4787]: I0129 13:38:27.929726 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd" (OuterVolumeSpecName: "kube-api-access-6scqd") pod "432d7a0e-772b-4a31-907f-d3c5b9bfe12a" (UID: "432d7a0e-772b-4a31-907f-d3c5b9bfe12a"). InnerVolumeSpecName "kube-api-access-6scqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022111 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5mz9\" (UniqueName: \"kubernetes.io/projected/a7df2fd4-19d7-4610-ad5a-9738f142c562-kube-api-access-q5mz9\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022167 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck6t8\" (UniqueName: \"kubernetes.io/projected/5955ac52-7d5b-4d18-95c2-c733b868af76-kube-api-access-ck6t8\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022179 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5955ac52-7d5b-4d18-95c2-c733b868af76-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022188 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91b2f9a1-513a-4fe0-8319-daf196d2afd8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022267 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022279 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7df2fd4-19d7-4610-ad5a-9738f142c562-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022288 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6scqd\" (UniqueName: \"kubernetes.io/projected/432d7a0e-772b-4a31-907f-d3c5b9bfe12a-kube-api-access-6scqd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.022296 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6plft\" (UniqueName: \"kubernetes.io/projected/91b2f9a1-513a-4fe0-8319-daf196d2afd8-kube-api-access-6plft\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.242896 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" event={"ID":"432d7a0e-772b-4a31-907f-d3c5b9bfe12a","Type":"ContainerDied","Data":"d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab"} Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.242942 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d832c924a9853fa2b32315dc7c47aedbb8dad81d2fc4c335471f2fffd03d31ab" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.243019 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-bjp2c" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.245665 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dc6vm" event={"ID":"5955ac52-7d5b-4d18-95c2-c733b868af76","Type":"ContainerDied","Data":"e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0"} Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.245699 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5b38551e32a2cd9e00c2f95de7a47833b844ce12a03c8251b8f10d8ecd94ec0" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.245774 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dc6vm" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.247943 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-2fvc2" event={"ID":"a7df2fd4-19d7-4610-ad5a-9738f142c562","Type":"ContainerDied","Data":"0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e"} Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.247974 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bf5f46cac8a5393bd5da593b834a78ccd70b7d60e307fcca488fb56a012085e" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.248035 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-2fvc2" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.250183 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" event={"ID":"91b2f9a1-513a-4fe0-8319-daf196d2afd8","Type":"ContainerDied","Data":"52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a"} Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.250214 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52eab1f0e032406f8f6bb1adcd45cd94447aee791a54f331ce3f51c8d85ab37a" Jan 29 13:38:28 crc kubenswrapper[4787]: I0129 13:38:28.250277 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-z99d7" Jan 29 13:38:29 crc kubenswrapper[4787]: I0129 13:38:29.841877 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:29 crc kubenswrapper[4787]: I0129 13:38:29.842488 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-log" containerID="cri-o://be8fd3f509d83fcdfe98d0989d9c77781b4706d3e3e97d9679f5768c5b68ac7d" gracePeriod=30 Jan 29 13:38:29 crc kubenswrapper[4787]: I0129 13:38:29.842539 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-httpd" containerID="cri-o://e7a3e3f5e79aa2de71737f592e1a4e16b7041628ae25cf8906f5f5ab0a696d5d" gracePeriod=30 Jan 29 13:38:30 crc kubenswrapper[4787]: I0129 13:38:30.266114 4787 generic.go:334] "Generic (PLEG): container finished" podID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerID="be8fd3f509d83fcdfe98d0989d9c77781b4706d3e3e97d9679f5768c5b68ac7d" exitCode=143 Jan 29 13:38:30 crc kubenswrapper[4787]: I0129 13:38:30.266155 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerDied","Data":"be8fd3f509d83fcdfe98d0989d9c77781b4706d3e3e97d9679f5768c5b68ac7d"} Jan 29 13:38:30 crc kubenswrapper[4787]: I0129 13:38:30.851162 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:31 crc kubenswrapper[4787]: I0129 13:38:31.820058 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:31 crc kubenswrapper[4787]: I0129 13:38:31.820610 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-log" containerID="cri-o://e9d435265c2eb8d397b6eca29e5c7936a7d76f638d7ae2f619c6266bf81fb41a" gracePeriod=30 Jan 29 13:38:31 crc kubenswrapper[4787]: I0129 13:38:31.820716 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-httpd" containerID="cri-o://e1b53c5eb86cba39eede1437beceb0141e4f598736c4bedefcaa3544df888e01" gracePeriod=30 Jan 29 13:38:32 crc kubenswrapper[4787]: I0129 13:38:32.285161 4787 generic.go:334] "Generic (PLEG): container finished" podID="2231985a-9fb4-4ada-8d50-f35907760eab" containerID="e9d435265c2eb8d397b6eca29e5c7936a7d76f638d7ae2f619c6266bf81fb41a" exitCode=143 Jan 29 13:38:32 crc kubenswrapper[4787]: I0129 13:38:32.285208 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerDied","Data":"e9d435265c2eb8d397b6eca29e5c7936a7d76f638d7ae2f619c6266bf81fb41a"} Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.011530 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-b47q7"] Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012479 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91b2f9a1-513a-4fe0-8319-daf196d2afd8" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012496 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="91b2f9a1-513a-4fe0-8319-daf196d2afd8" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012509 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="432d7a0e-772b-4a31-907f-d3c5b9bfe12a" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012517 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="432d7a0e-772b-4a31-907f-d3c5b9bfe12a" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012529 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="120db9b0-7739-4e28-ba21-4f8bedf3a8d8" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012536 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="120db9b0-7739-4e28-ba21-4f8bedf3a8d8" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012561 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5955ac52-7d5b-4d18-95c2-c733b868af76" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012569 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5955ac52-7d5b-4d18-95c2-c733b868af76" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012580 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="558f12be-14a0-43bb-9f88-98f6ddafa81f" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012587 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="558f12be-14a0-43bb-9f88-98f6ddafa81f" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: E0129 13:38:34.012605 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7df2fd4-19d7-4610-ad5a-9738f142c562" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012612 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7df2fd4-19d7-4610-ad5a-9738f142c562" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012812 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="120db9b0-7739-4e28-ba21-4f8bedf3a8d8" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012843 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7df2fd4-19d7-4610-ad5a-9738f142c562" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012863 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="558f12be-14a0-43bb-9f88-98f6ddafa81f" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012872 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="432d7a0e-772b-4a31-907f-d3c5b9bfe12a" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012885 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="91b2f9a1-513a-4fe0-8319-daf196d2afd8" containerName="mariadb-account-create-update" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.012893 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5955ac52-7d5b-4d18-95c2-c733b868af76" containerName="mariadb-database-create" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.013635 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.016699 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.016785 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-wbn2g" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.019220 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.021642 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-b47q7"] Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.123678 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.124161 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.124343 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.124444 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26nrn\" (UniqueName: \"kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.226529 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.226602 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26nrn\" (UniqueName: \"kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.226680 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.226746 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.245750 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.246059 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.250532 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26nrn\" (UniqueName: \"kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.290131 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts\") pod \"nova-cell0-conductor-db-sync-b47q7\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.311452 4787 generic.go:334] "Generic (PLEG): container finished" podID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerID="e7a3e3f5e79aa2de71737f592e1a4e16b7041628ae25cf8906f5f5ab0a696d5d" exitCode=0 Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.311531 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerDied","Data":"e7a3e3f5e79aa2de71737f592e1a4e16b7041628ae25cf8906f5f5ab0a696d5d"} Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.334150 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.481900 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531803 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531863 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531889 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531928 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531943 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.531984 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.532019 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.532040 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkjwd\" (UniqueName: \"kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd\") pod \"a1d921a8-744d-46fd-b3be-7e79be2532b5\" (UID: \"a1d921a8-744d-46fd-b3be-7e79be2532b5\") " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.532845 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.533091 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs" (OuterVolumeSpecName: "logs") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.538750 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.545123 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd" (OuterVolumeSpecName: "kube-api-access-bkjwd") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "kube-api-access-bkjwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.549677 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts" (OuterVolumeSpecName: "scripts") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.599825 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.611688 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data" (OuterVolumeSpecName: "config-data") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.634921 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.634948 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.634977 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.634988 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a1d921a8-744d-46fd-b3be-7e79be2532b5-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.634998 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.635007 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.635018 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkjwd\" (UniqueName: \"kubernetes.io/projected/a1d921a8-744d-46fd-b3be-7e79be2532b5-kube-api-access-bkjwd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.635911 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a1d921a8-744d-46fd-b3be-7e79be2532b5" (UID: "a1d921a8-744d-46fd-b3be-7e79be2532b5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.657217 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.736796 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.736826 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a1d921a8-744d-46fd-b3be-7e79be2532b5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:34 crc kubenswrapper[4787]: I0129 13:38:34.874223 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-b47q7"] Jan 29 13:38:34 crc kubenswrapper[4787]: W0129 13:38:34.877507 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0db5cf09_a05d_4ed0_b8b2_d84bad018d43.slice/crio-6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59 WatchSource:0}: Error finding container 6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59: Status 404 returned error can't find the container with id 6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59 Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.323722 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-b47q7" event={"ID":"0db5cf09-a05d-4ed0-b8b2-d84bad018d43","Type":"ContainerStarted","Data":"6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59"} Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.326113 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.326144 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a1d921a8-744d-46fd-b3be-7e79be2532b5","Type":"ContainerDied","Data":"0193a862b13611e39081a3479b67d8a812951dc343d22cfae974ac0a670af57e"} Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.326201 4787 scope.go:117] "RemoveContainer" containerID="e7a3e3f5e79aa2de71737f592e1a4e16b7041628ae25cf8906f5f5ab0a696d5d" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.333006 4787 generic.go:334] "Generic (PLEG): container finished" podID="2231985a-9fb4-4ada-8d50-f35907760eab" containerID="e1b53c5eb86cba39eede1437beceb0141e4f598736c4bedefcaa3544df888e01" exitCode=0 Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.333074 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerDied","Data":"e1b53c5eb86cba39eede1437beceb0141e4f598736c4bedefcaa3544df888e01"} Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.356870 4787 scope.go:117] "RemoveContainer" containerID="be8fd3f509d83fcdfe98d0989d9c77781b4706d3e3e97d9679f5768c5b68ac7d" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.384393 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.411524 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.425283 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:35 crc kubenswrapper[4787]: E0129 13:38:35.426001 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-log" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.426046 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-log" Jan 29 13:38:35 crc kubenswrapper[4787]: E0129 13:38:35.426090 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-httpd" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.426099 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-httpd" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.426479 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-log" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.426547 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" containerName="glance-httpd" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.428327 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.431051 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.431260 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.435783 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.548163 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550185 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550230 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550281 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5q4f\" (UniqueName: \"kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550306 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550581 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550628 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550780 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.550829 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652151 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652555 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652648 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652706 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6clr9\" (UniqueName: \"kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652764 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652792 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652890 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.652918 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"2231985a-9fb4-4ada-8d50-f35907760eab\" (UID: \"2231985a-9fb4-4ada-8d50-f35907760eab\") " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653195 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs" (OuterVolumeSpecName: "logs") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653227 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653263 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653283 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653331 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653363 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653495 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653526 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653569 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5q4f\" (UniqueName: \"kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653613 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653673 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653687 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2231985a-9fb4-4ada-8d50-f35907760eab-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653853 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.653951 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.657888 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.659786 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.660950 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9" (OuterVolumeSpecName: "kube-api-access-6clr9") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "kube-api-access-6clr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.662082 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.662573 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.662632 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.668552 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts" (OuterVolumeSpecName: "scripts") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.673179 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.679844 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5q4f\" (UniqueName: \"kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.685162 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.725634 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.749952 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.754844 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.754870 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.754881 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.754890 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.754899 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6clr9\" (UniqueName: \"kubernetes.io/projected/2231985a-9fb4-4ada-8d50-f35907760eab-kube-api-access-6clr9\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.759915 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.770677 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data" (OuterVolumeSpecName: "config-data") pod "2231985a-9fb4-4ada-8d50-f35907760eab" (UID: "2231985a-9fb4-4ada-8d50-f35907760eab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.783365 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.857064 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2231985a-9fb4-4ada-8d50-f35907760eab-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.857100 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:35 crc kubenswrapper[4787]: I0129 13:38:35.997576 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1d921a8-744d-46fd-b3be-7e79be2532b5" path="/var/lib/kubelet/pods/a1d921a8-744d-46fd-b3be-7e79be2532b5/volumes" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.309722 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:38:36 crc kubenswrapper[4787]: W0129 13:38:36.326589 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67675e2f_3a2b_4552_bbd5_c12b3ba3a505.slice/crio-510292bf20b18a58473d8b589500d70c5a7346b2e80c96773be02224365a7c29 WatchSource:0}: Error finding container 510292bf20b18a58473d8b589500d70c5a7346b2e80c96773be02224365a7c29: Status 404 returned error can't find the container with id 510292bf20b18a58473d8b589500d70c5a7346b2e80c96773be02224365a7c29 Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.348173 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerStarted","Data":"510292bf20b18a58473d8b589500d70c5a7346b2e80c96773be02224365a7c29"} Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.351899 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2231985a-9fb4-4ada-8d50-f35907760eab","Type":"ContainerDied","Data":"b07080e69232535ee2ff05a09fcc8ab3454b86b0e813890800eeae565d27f262"} Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.351945 4787 scope.go:117] "RemoveContainer" containerID="e1b53c5eb86cba39eede1437beceb0141e4f598736c4bedefcaa3544df888e01" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.352067 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.390686 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.416307 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.416361 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:36 crc kubenswrapper[4787]: E0129 13:38:36.416681 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-log" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.416772 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-log" Jan 29 13:38:36 crc kubenswrapper[4787]: E0129 13:38:36.416793 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-httpd" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.416800 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-httpd" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.419342 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-httpd" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.419383 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" containerName="glance-log" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.420414 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.427724 4787 scope.go:117] "RemoveContainer" containerID="e9d435265c2eb8d397b6eca29e5c7936a7d76f638d7ae2f619c6266bf81fb41a" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.428509 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.428655 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.450059 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571627 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571732 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571817 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571842 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571862 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571885 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.571953 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2ffs\" (UniqueName: \"kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.572102 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673705 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673789 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673865 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673889 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673912 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.673940 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.674006 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2ffs\" (UniqueName: \"kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.674055 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.674366 4787 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.674570 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.674854 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.686688 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.686906 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.687795 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.697895 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.707410 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2ffs\" (UniqueName: \"kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.740611 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " pod="openstack/glance-default-internal-api-0" Jan 29 13:38:36 crc kubenswrapper[4787]: I0129 13:38:36.789093 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:37 crc kubenswrapper[4787]: I0129 13:38:37.336620 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:38:37 crc kubenswrapper[4787]: W0129 13:38:37.358653 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b19d6d2_57d4_4c0b_aa0d_7184ea42da0a.slice/crio-27c09118a058b498c230143715c2f7817a1c8016898e129c0a6aa65e33d45728 WatchSource:0}: Error finding container 27c09118a058b498c230143715c2f7817a1c8016898e129c0a6aa65e33d45728: Status 404 returned error can't find the container with id 27c09118a058b498c230143715c2f7817a1c8016898e129c0a6aa65e33d45728 Jan 29 13:38:37 crc kubenswrapper[4787]: I0129 13:38:37.370222 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerStarted","Data":"2f63dfa9d7bf21cc31d7b9d8c380ef9fbae854b8b7d202f4e7c02c9ab75414bb"} Jan 29 13:38:38 crc kubenswrapper[4787]: I0129 13:38:38.003556 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2231985a-9fb4-4ada-8d50-f35907760eab" path="/var/lib/kubelet/pods/2231985a-9fb4-4ada-8d50-f35907760eab/volumes" Jan 29 13:38:38 crc kubenswrapper[4787]: I0129 13:38:38.385213 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a","Type":"ContainerStarted","Data":"431d19c70bb7fc09c0cea13cec37421e6b58778d78ec5b6bf958f731b6ca0476"} Jan 29 13:38:38 crc kubenswrapper[4787]: I0129 13:38:38.385719 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a","Type":"ContainerStarted","Data":"27c09118a058b498c230143715c2f7817a1c8016898e129c0a6aa65e33d45728"} Jan 29 13:38:38 crc kubenswrapper[4787]: I0129 13:38:38.390642 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerStarted","Data":"e1a61b54bf10478ca80351b015706e154501c6dbfad962662f03a6e51dfe02bb"} Jan 29 13:38:38 crc kubenswrapper[4787]: I0129 13:38:38.424575 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.424546651 podStartE2EDuration="3.424546651s" podCreationTimestamp="2026-01-29 13:38:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:38.408708831 +0000 UTC m=+1357.169969097" watchObservedRunningTime="2026-01-29 13:38:38.424546651 +0000 UTC m=+1357.185806937" Jan 29 13:38:39 crc kubenswrapper[4787]: I0129 13:38:39.403882 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a","Type":"ContainerStarted","Data":"a8b8d261a49e47ed22a0cd5c563cf4143e2a75230a47767988c41d719d54d742"} Jan 29 13:38:39 crc kubenswrapper[4787]: I0129 13:38:39.438808 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.438791709 podStartE2EDuration="3.438791709s" podCreationTimestamp="2026-01-29 13:38:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:38:39.425632528 +0000 UTC m=+1358.186892804" watchObservedRunningTime="2026-01-29 13:38:39.438791709 +0000 UTC m=+1358.200051985" Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.466985 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerStarted","Data":"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825"} Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.467908 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.467371 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="sg-core" containerID="cri-o://b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652" gracePeriod=30 Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.467341 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-notification-agent" containerID="cri-o://32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828" gracePeriod=30 Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.467498 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="proxy-httpd" containerID="cri-o://854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825" gracePeriod=30 Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.467680 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-central-agent" containerID="cri-o://505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698" gracePeriod=30 Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.470141 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-b47q7" event={"ID":"0db5cf09-a05d-4ed0-b8b2-d84bad018d43","Type":"ContainerStarted","Data":"eb129b091c246c6b3efe6a68ff640e44a192a5d03b945968aaf8ecf282346312"} Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.506210 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.482809951 podStartE2EDuration="21.506184959s" podCreationTimestamp="2026-01-29 13:38:23 +0000 UTC" firstStartedPulling="2026-01-29 13:38:24.479951248 +0000 UTC m=+1343.241211524" lastFinishedPulling="2026-01-29 13:38:43.503326256 +0000 UTC m=+1362.264586532" observedRunningTime="2026-01-29 13:38:44.491861258 +0000 UTC m=+1363.253121574" watchObservedRunningTime="2026-01-29 13:38:44.506184959 +0000 UTC m=+1363.267445245" Jan 29 13:38:44 crc kubenswrapper[4787]: I0129 13:38:44.521061 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-b47q7" podStartSLOduration=2.961697831 podStartE2EDuration="11.521034884s" podCreationTimestamp="2026-01-29 13:38:33 +0000 UTC" firstStartedPulling="2026-01-29 13:38:34.879541234 +0000 UTC m=+1353.640801500" lastFinishedPulling="2026-01-29 13:38:43.438878277 +0000 UTC m=+1362.200138553" observedRunningTime="2026-01-29 13:38:44.508194391 +0000 UTC m=+1363.269454707" watchObservedRunningTime="2026-01-29 13:38:44.521034884 +0000 UTC m=+1363.282295160" Jan 29 13:38:44 crc kubenswrapper[4787]: E0129 13:38:44.641447 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a3f598_761b_4f31_b44d_276f53f9ff54.slice/crio-b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5a3f598_761b_4f31_b44d_276f53f9ff54.slice/crio-854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825.scope\": RecentStats: unable to find data in memory cache]" Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.491679 4787 generic.go:334] "Generic (PLEG): container finished" podID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerID="854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825" exitCode=0 Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.492110 4787 generic.go:334] "Generic (PLEG): container finished" podID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerID="b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652" exitCode=2 Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.492128 4787 generic.go:334] "Generic (PLEG): container finished" podID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerID="505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698" exitCode=0 Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.492527 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerDied","Data":"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825"} Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.492590 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerDied","Data":"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652"} Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.492602 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerDied","Data":"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698"} Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.761110 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.761179 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.813398 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 13:38:45 crc kubenswrapper[4787]: I0129 13:38:45.815304 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.505204 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.505690 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.790412 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.790485 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.840187 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:46 crc kubenswrapper[4787]: I0129 13:38:46.844830 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:47 crc kubenswrapper[4787]: I0129 13:38:47.516191 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:47 crc kubenswrapper[4787]: I0129 13:38:47.516274 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.146226 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.265081 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266196 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266309 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266339 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266467 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266547 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266574 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.266647 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgc2q\" (UniqueName: \"kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q\") pod \"f5a3f598-761b-4f31-b44d-276f53f9ff54\" (UID: \"f5a3f598-761b-4f31-b44d-276f53f9ff54\") " Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.268151 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.268558 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.276351 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts" (OuterVolumeSpecName: "scripts") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.276395 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q" (OuterVolumeSpecName: "kube-api-access-zgc2q") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "kube-api-access-zgc2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.339678 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.368398 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.368440 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgc2q\" (UniqueName: \"kubernetes.io/projected/f5a3f598-761b-4f31-b44d-276f53f9ff54-kube-api-access-zgc2q\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.368469 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.368482 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5a3f598-761b-4f31-b44d-276f53f9ff54-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.368495 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.381754 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.395090 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.400534 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data" (OuterVolumeSpecName: "config-data") pod "f5a3f598-761b-4f31-b44d-276f53f9ff54" (UID: "f5a3f598-761b-4f31-b44d-276f53f9ff54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.470439 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.470915 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5a3f598-761b-4f31-b44d-276f53f9ff54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.545831 4787 generic.go:334] "Generic (PLEG): container finished" podID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerID="32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828" exitCode=0 Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.545945 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.545989 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerDied","Data":"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828"} Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.546034 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5a3f598-761b-4f31-b44d-276f53f9ff54","Type":"ContainerDied","Data":"953605f575348a803f01562a714c2df429420fa6fa1047723136f1c079fec937"} Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.546051 4787 scope.go:117] "RemoveContainer" containerID="854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.591597 4787 scope.go:117] "RemoveContainer" containerID="b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.591736 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.614671 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.622654 4787 scope.go:117] "RemoveContainer" containerID="32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.631220 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.631689 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-notification-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.631704 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-notification-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.631718 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-central-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.631724 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-central-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.631743 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="sg-core" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.631751 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="sg-core" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.631758 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="proxy-httpd" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.631764 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="proxy-httpd" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.632151 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-notification-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.632167 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="proxy-httpd" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.632188 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="sg-core" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.632198 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" containerName="ceilometer-central-agent" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.634187 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.637054 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.638795 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.651642 4787 scope.go:117] "RemoveContainer" containerID="505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.661499 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.688867 4787 scope.go:117] "RemoveContainer" containerID="854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.689381 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825\": container with ID starting with 854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825 not found: ID does not exist" containerID="854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.689418 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825"} err="failed to get container status \"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825\": rpc error: code = NotFound desc = could not find container \"854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825\": container with ID starting with 854a7d6990c39f1e3c2bd20ab2f455f453331adf1c524836548d4b12436c3825 not found: ID does not exist" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.689441 4787 scope.go:117] "RemoveContainer" containerID="b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.689714 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652\": container with ID starting with b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652 not found: ID does not exist" containerID="b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.689742 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652"} err="failed to get container status \"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652\": rpc error: code = NotFound desc = could not find container \"b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652\": container with ID starting with b2baa24bc7c99d3755d65385eccd4de1e7d6a77fb56f603eeba8a3277b956652 not found: ID does not exist" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.689759 4787 scope.go:117] "RemoveContainer" containerID="32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.690016 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828\": container with ID starting with 32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828 not found: ID does not exist" containerID="32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.690043 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828"} err="failed to get container status \"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828\": rpc error: code = NotFound desc = could not find container \"32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828\": container with ID starting with 32379a8f98806d6be12ef73d1c01acea8d7eface29f0b714c3e8b37768715828 not found: ID does not exist" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.690058 4787 scope.go:117] "RemoveContainer" containerID="505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698" Jan 29 13:38:48 crc kubenswrapper[4787]: E0129 13:38:48.690327 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698\": container with ID starting with 505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698 not found: ID does not exist" containerID="505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.690352 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698"} err="failed to get container status \"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698\": rpc error: code = NotFound desc = could not find container \"505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698\": container with ID starting with 505d986af107b1cd5de2af42ce74cf42edc7e90fc180f05f51e446725d229698 not found: ID does not exist" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788302 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788402 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788429 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788466 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgsrw\" (UniqueName: \"kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788481 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788670 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.788915 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.890915 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891004 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891077 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891103 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891137 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgsrw\" (UniqueName: \"kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891160 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891213 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891525 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.891859 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.895927 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.896624 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.896771 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.906086 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.916245 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgsrw\" (UniqueName: \"kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw\") pod \"ceilometer-0\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " pod="openstack/ceilometer-0" Jan 29 13:38:48 crc kubenswrapper[4787]: I0129 13:38:48.965630 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:38:49 crc kubenswrapper[4787]: I0129 13:38:49.412279 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:49 crc kubenswrapper[4787]: I0129 13:38:49.445414 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:49 crc kubenswrapper[4787]: I0129 13:38:49.556776 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerStarted","Data":"696cfafad4a9a0b43a426b77468644400e5ee8f027ddcfd692782bcaa1ed9eec"} Jan 29 13:38:49 crc kubenswrapper[4787]: I0129 13:38:49.558137 4787 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 29 13:38:49 crc kubenswrapper[4787]: I0129 13:38:49.595299 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 29 13:38:50 crc kubenswrapper[4787]: I0129 13:38:50.000012 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5a3f598-761b-4f31-b44d-276f53f9ff54" path="/var/lib/kubelet/pods/f5a3f598-761b-4f31-b44d-276f53f9ff54/volumes" Jan 29 13:38:50 crc kubenswrapper[4787]: I0129 13:38:50.568805 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerStarted","Data":"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e"} Jan 29 13:38:50 crc kubenswrapper[4787]: I0129 13:38:50.768448 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:38:51 crc kubenswrapper[4787]: I0129 13:38:51.579911 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerStarted","Data":"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1"} Jan 29 13:38:53 crc kubenswrapper[4787]: I0129 13:38:53.608549 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerStarted","Data":"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6"} Jan 29 13:38:54 crc kubenswrapper[4787]: I0129 13:38:54.625156 4787 generic.go:334] "Generic (PLEG): container finished" podID="0db5cf09-a05d-4ed0-b8b2-d84bad018d43" containerID="eb129b091c246c6b3efe6a68ff640e44a192a5d03b945968aaf8ecf282346312" exitCode=0 Jan 29 13:38:54 crc kubenswrapper[4787]: I0129 13:38:54.625999 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-b47q7" event={"ID":"0db5cf09-a05d-4ed0-b8b2-d84bad018d43","Type":"ContainerDied","Data":"eb129b091c246c6b3efe6a68ff640e44a192a5d03b945968aaf8ecf282346312"} Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.638996 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerStarted","Data":"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7"} Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.639194 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-central-agent" containerID="cri-o://93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e" gracePeriod=30 Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.639445 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="proxy-httpd" containerID="cri-o://deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7" gracePeriod=30 Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.639451 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="sg-core" containerID="cri-o://56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6" gracePeriod=30 Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.639512 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-notification-agent" containerID="cri-o://7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1" gracePeriod=30 Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.674929 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.130957378 podStartE2EDuration="7.674909726s" podCreationTimestamp="2026-01-29 13:38:48 +0000 UTC" firstStartedPulling="2026-01-29 13:38:49.42027414 +0000 UTC m=+1368.181534416" lastFinishedPulling="2026-01-29 13:38:54.964226488 +0000 UTC m=+1373.725486764" observedRunningTime="2026-01-29 13:38:55.669849035 +0000 UTC m=+1374.431109321" watchObservedRunningTime="2026-01-29 13:38:55.674909726 +0000 UTC m=+1374.436170012" Jan 29 13:38:55 crc kubenswrapper[4787]: I0129 13:38:55.929547 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.021492 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts\") pod \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.021615 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data\") pod \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.022556 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26nrn\" (UniqueName: \"kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn\") pod \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.022588 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle\") pod \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\" (UID: \"0db5cf09-a05d-4ed0-b8b2-d84bad018d43\") " Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.027225 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn" (OuterVolumeSpecName: "kube-api-access-26nrn") pod "0db5cf09-a05d-4ed0-b8b2-d84bad018d43" (UID: "0db5cf09-a05d-4ed0-b8b2-d84bad018d43"). InnerVolumeSpecName "kube-api-access-26nrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.028878 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts" (OuterVolumeSpecName: "scripts") pod "0db5cf09-a05d-4ed0-b8b2-d84bad018d43" (UID: "0db5cf09-a05d-4ed0-b8b2-d84bad018d43"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.052495 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0db5cf09-a05d-4ed0-b8b2-d84bad018d43" (UID: "0db5cf09-a05d-4ed0-b8b2-d84bad018d43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.056147 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data" (OuterVolumeSpecName: "config-data") pod "0db5cf09-a05d-4ed0-b8b2-d84bad018d43" (UID: "0db5cf09-a05d-4ed0-b8b2-d84bad018d43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.125011 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.125189 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26nrn\" (UniqueName: \"kubernetes.io/projected/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-kube-api-access-26nrn\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.125267 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.125322 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0db5cf09-a05d-4ed0-b8b2-d84bad018d43-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.655110 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-b47q7" event={"ID":"0db5cf09-a05d-4ed0-b8b2-d84bad018d43","Type":"ContainerDied","Data":"6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59"} Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.655158 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c8a94ab95c0d42972b39acb799e6f3ea291088ea7314f96bf99eb56a64fda59" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.655199 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-b47q7" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659276 4787 generic.go:334] "Generic (PLEG): container finished" podID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerID="deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7" exitCode=0 Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659317 4787 generic.go:334] "Generic (PLEG): container finished" podID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerID="56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6" exitCode=2 Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659329 4787 generic.go:334] "Generic (PLEG): container finished" podID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerID="7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1" exitCode=0 Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659353 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerDied","Data":"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7"} Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659384 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerDied","Data":"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6"} Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.659399 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerDied","Data":"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1"} Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.762139 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:38:56 crc kubenswrapper[4787]: E0129 13:38:56.762493 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db5cf09-a05d-4ed0-b8b2-d84bad018d43" containerName="nova-cell0-conductor-db-sync" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.762505 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db5cf09-a05d-4ed0-b8b2-d84bad018d43" containerName="nova-cell0-conductor-db-sync" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.762683 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db5cf09-a05d-4ed0-b8b2-d84bad018d43" containerName="nova-cell0-conductor-db-sync" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.763209 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.765574 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.767328 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-wbn2g" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.788975 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.836387 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.836533 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.836793 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zn9w\" (UniqueName: \"kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.938941 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.939587 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.939796 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zn9w\" (UniqueName: \"kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.945625 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.951097 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:56 crc kubenswrapper[4787]: I0129 13:38:56.957896 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zn9w\" (UniqueName: \"kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w\") pod \"nova-cell0-conductor-0\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:57 crc kubenswrapper[4787]: I0129 13:38:57.082032 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:38:57 crc kubenswrapper[4787]: I0129 13:38:57.593483 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:38:57 crc kubenswrapper[4787]: W0129 13:38:57.602889 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52053f33_608f_4f1e_9432_baece90d08fb.slice/crio-d96a1798aec1e4f527b2f3cfd3ec91125a39e1f9e6326a7749f987b8d96dc8a4 WatchSource:0}: Error finding container d96a1798aec1e4f527b2f3cfd3ec91125a39e1f9e6326a7749f987b8d96dc8a4: Status 404 returned error can't find the container with id d96a1798aec1e4f527b2f3cfd3ec91125a39e1f9e6326a7749f987b8d96dc8a4 Jan 29 13:38:57 crc kubenswrapper[4787]: I0129 13:38:57.675655 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"52053f33-608f-4f1e-9432-baece90d08fb","Type":"ContainerStarted","Data":"d96a1798aec1e4f527b2f3cfd3ec91125a39e1f9e6326a7749f987b8d96dc8a4"} Jan 29 13:39:00 crc kubenswrapper[4787]: I0129 13:39:00.742965 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"52053f33-608f-4f1e-9432-baece90d08fb","Type":"ContainerStarted","Data":"2b304ab8c2c786d238ef41d0439f6a9dcc42e20c02b9de41cde375bfb43bf8a1"} Jan 29 13:39:00 crc kubenswrapper[4787]: I0129 13:39:00.743781 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.729485 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.763905 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=6.763880604 podStartE2EDuration="6.763880604s" podCreationTimestamp="2026-01-29 13:38:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:00.767026187 +0000 UTC m=+1379.528286483" watchObservedRunningTime="2026-01-29 13:39:02.763880604 +0000 UTC m=+1381.525140890" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.769924 4787 generic.go:334] "Generic (PLEG): container finished" podID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerID="93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e" exitCode=0 Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.769963 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerDied","Data":"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e"} Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.769989 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d17ccc0-6485-41ea-b5db-3f693e8359bf","Type":"ContainerDied","Data":"696cfafad4a9a0b43a426b77468644400e5ee8f027ddcfd692782bcaa1ed9eec"} Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.770009 4787 scope.go:117] "RemoveContainer" containerID="deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.770186 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779097 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779193 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779233 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgsrw\" (UniqueName: \"kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779377 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779444 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779487 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.779507 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd\") pod \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\" (UID: \"1d17ccc0-6485-41ea-b5db-3f693e8359bf\") " Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.780207 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.780314 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.785618 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw" (OuterVolumeSpecName: "kube-api-access-vgsrw") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "kube-api-access-vgsrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.787195 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts" (OuterVolumeSpecName: "scripts") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.815609 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.869156 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881611 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881645 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881662 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881673 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d17ccc0-6485-41ea-b5db-3f693e8359bf-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881686 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.881696 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgsrw\" (UniqueName: \"kubernetes.io/projected/1d17ccc0-6485-41ea-b5db-3f693e8359bf-kube-api-access-vgsrw\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.885678 4787 scope.go:117] "RemoveContainer" containerID="56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.893498 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data" (OuterVolumeSpecName: "config-data") pod "1d17ccc0-6485-41ea-b5db-3f693e8359bf" (UID: "1d17ccc0-6485-41ea-b5db-3f693e8359bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.900787 4787 scope.go:117] "RemoveContainer" containerID="7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.917236 4787 scope.go:117] "RemoveContainer" containerID="93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.933195 4787 scope.go:117] "RemoveContainer" containerID="deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7" Jan 29 13:39:02 crc kubenswrapper[4787]: E0129 13:39:02.933557 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7\": container with ID starting with deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7 not found: ID does not exist" containerID="deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.933600 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7"} err="failed to get container status \"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7\": rpc error: code = NotFound desc = could not find container \"deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7\": container with ID starting with deda09e444f586edc9b906fe9c1c74b8fbc99656cda4d47ad9a237fdd25f7da7 not found: ID does not exist" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.933622 4787 scope.go:117] "RemoveContainer" containerID="56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6" Jan 29 13:39:02 crc kubenswrapper[4787]: E0129 13:39:02.933889 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6\": container with ID starting with 56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6 not found: ID does not exist" containerID="56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.933910 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6"} err="failed to get container status \"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6\": rpc error: code = NotFound desc = could not find container \"56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6\": container with ID starting with 56efb7461fe17b442b4e1811eacb1575f9cddd7a75a5024a0e977d6262ac34a6 not found: ID does not exist" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.933942 4787 scope.go:117] "RemoveContainer" containerID="7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1" Jan 29 13:39:02 crc kubenswrapper[4787]: E0129 13:39:02.934147 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1\": container with ID starting with 7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1 not found: ID does not exist" containerID="7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.934196 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1"} err="failed to get container status \"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1\": rpc error: code = NotFound desc = could not find container \"7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1\": container with ID starting with 7079824f032867c71bd0c437a22a18c5be36864a44e37efc61d9c4c7eb0252f1 not found: ID does not exist" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.934216 4787 scope.go:117] "RemoveContainer" containerID="93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e" Jan 29 13:39:02 crc kubenswrapper[4787]: E0129 13:39:02.934433 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e\": container with ID starting with 93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e not found: ID does not exist" containerID="93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.934470 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e"} err="failed to get container status \"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e\": rpc error: code = NotFound desc = could not find container \"93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e\": container with ID starting with 93c66f44f21945fb3b87f4f9b716749ff8e64470f0a7b9370e1026b16450457e not found: ID does not exist" Jan 29 13:39:02 crc kubenswrapper[4787]: I0129 13:39:02.982888 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d17ccc0-6485-41ea-b5db-3f693e8359bf-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.154830 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.181618 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192131 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:03 crc kubenswrapper[4787]: E0129 13:39:03.192504 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-notification-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192522 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-notification-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: E0129 13:39:03.192535 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="sg-core" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192542 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="sg-core" Jan 29 13:39:03 crc kubenswrapper[4787]: E0129 13:39:03.192551 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="proxy-httpd" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192557 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="proxy-httpd" Jan 29 13:39:03 crc kubenswrapper[4787]: E0129 13:39:03.192586 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-central-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192592 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-central-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192735 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-notification-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192752 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="ceilometer-central-agent" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192764 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="sg-core" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.192777 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" containerName="proxy-httpd" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.194361 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.196369 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.196620 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.201867 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.288802 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.288870 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x8jd\" (UniqueName: \"kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.288962 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.289052 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.289147 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.289200 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.289291 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391542 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391590 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x8jd\" (UniqueName: \"kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391627 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391676 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391716 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391758 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.391801 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.392377 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.392575 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.396916 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.397173 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.397190 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.400147 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.426776 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x8jd\" (UniqueName: \"kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd\") pod \"ceilometer-0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.509287 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:03 crc kubenswrapper[4787]: W0129 13:39:03.996227 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bcf9827_eefc_44fe_9a1d_237c95b9dbd0.slice/crio-973f3d6d15dbabea927220affdcb15d7d42bf3c4f7fee78d8aba4c695eeda94c WatchSource:0}: Error finding container 973f3d6d15dbabea927220affdcb15d7d42bf3c4f7fee78d8aba4c695eeda94c: Status 404 returned error can't find the container with id 973f3d6d15dbabea927220affdcb15d7d42bf3c4f7fee78d8aba4c695eeda94c Jan 29 13:39:03 crc kubenswrapper[4787]: I0129 13:39:03.999037 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d17ccc0-6485-41ea-b5db-3f693e8359bf" path="/var/lib/kubelet/pods/1d17ccc0-6485-41ea-b5db-3f693e8359bf/volumes" Jan 29 13:39:04 crc kubenswrapper[4787]: I0129 13:39:04.000500 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:04 crc kubenswrapper[4787]: I0129 13:39:04.792519 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerStarted","Data":"973f3d6d15dbabea927220affdcb15d7d42bf3c4f7fee78d8aba4c695eeda94c"} Jan 29 13:39:05 crc kubenswrapper[4787]: I0129 13:39:05.812283 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerStarted","Data":"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55"} Jan 29 13:39:05 crc kubenswrapper[4787]: I0129 13:39:05.812579 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerStarted","Data":"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859"} Jan 29 13:39:06 crc kubenswrapper[4787]: I0129 13:39:06.822165 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerStarted","Data":"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee"} Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.111896 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.595655 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-nvhcw"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.597377 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.601960 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.602241 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.615988 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-nvhcw"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.673149 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.673194 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.673214 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k94md\" (UniqueName: \"kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.673318 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.729660 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.731328 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.735448 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.749096 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774594 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774635 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774659 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k94md\" (UniqueName: \"kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774719 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7hq5\" (UniqueName: \"kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774754 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774778 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.774861 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.793322 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.814064 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.817922 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.819016 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k94md\" (UniqueName: \"kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md\") pod \"nova-cell0-cell-mapping-nvhcw\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.843525 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.845099 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.860468 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.879754 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881351 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881421 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7hq5\" (UniqueName: \"kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881464 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881480 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stnnk\" (UniqueName: \"kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881526 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881603 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.881619 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.900264 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.901762 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.920052 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7hq5\" (UniqueName: \"kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5\") pod \"nova-scheduler-0\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.922177 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.984059 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.984141 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.984175 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stnnk\" (UniqueName: \"kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.984281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:07 crc kubenswrapper[4787]: I0129 13:39:07.984819 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.000360 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.019246 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.021169 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.031180 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stnnk\" (UniqueName: \"kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk\") pod \"nova-metadata-0\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " pod="openstack/nova-metadata-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.032735 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.049122 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.066852 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.067297 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.069133 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.097889 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.110854 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.111075 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.111199 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.111270 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.111438 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6sh5\" (UniqueName: \"kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.106310 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.112402 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.122988 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.206621 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.208213 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.222528 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.225976 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248777 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248826 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248911 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248959 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248976 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.248998 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.249070 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.249107 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6sh5\" (UniqueName: \"kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.249150 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.249216 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfztq\" (UniqueName: \"kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.250071 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.250234 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.250793 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.253325 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.253845 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.279678 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6sh5\" (UniqueName: \"kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5\") pod \"dnsmasq-dns-5bfb54f9b5-qk4t2\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.351943 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.352281 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfztq\" (UniqueName: \"kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.352387 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjrh2\" (UniqueName: \"kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.352433 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.352503 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.352543 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.354819 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.367257 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.369613 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.385089 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.389977 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfztq\" (UniqueName: \"kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq\") pod \"nova-api-0\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.432752 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.456143 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjrh2\" (UniqueName: \"kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.456240 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.456264 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.465894 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.475989 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.476820 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjrh2\" (UniqueName: \"kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2\") pod \"nova-cell1-novncproxy-0\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.499200 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.592533 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.702702 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-nvhcw"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.845641 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.905417 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerStarted","Data":"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7"} Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.908165 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.912745 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nvhcw" event={"ID":"3c0e8878-777a-4637-906a-c23cd622a9ee","Type":"ContainerStarted","Data":"ef6817e5d3101342391d06a8f3b98c03c29c7799e14d4c1f162fad0bee06b76a"} Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.940223 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.601318351 podStartE2EDuration="5.940203474s" podCreationTimestamp="2026-01-29 13:39:03 +0000 UTC" firstStartedPulling="2026-01-29 13:39:03.999244326 +0000 UTC m=+1382.760504612" lastFinishedPulling="2026-01-29 13:39:08.338129459 +0000 UTC m=+1387.099389735" observedRunningTime="2026-01-29 13:39:08.93219319 +0000 UTC m=+1387.693453466" watchObservedRunningTime="2026-01-29 13:39:08.940203474 +0000 UTC m=+1387.701463750" Jan 29 13:39:08 crc kubenswrapper[4787]: I0129 13:39:08.970582 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:08 crc kubenswrapper[4787]: W0129 13:39:08.971035 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6affeb5_7b7c_450a_ae9b_e1288b44acd2.slice/crio-d3cb9ffc310f6526ea721f472048c197c287487f007f39b8a026837c7b928631 WatchSource:0}: Error finding container d3cb9ffc310f6526ea721f472048c197c287487f007f39b8a026837c7b928631: Status 404 returned error can't find the container with id d3cb9ffc310f6526ea721f472048c197c287487f007f39b8a026837c7b928631 Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.082846 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:09 crc kubenswrapper[4787]: W0129 13:39:09.087172 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod617d1f09_2a16_4006_9f10_a71a24c67f98.slice/crio-0c195c9d24eb7ba0f4ed68517e69ca99afb5d85c6f9e25432ad49fa2bbbb4dd3 WatchSource:0}: Error finding container 0c195c9d24eb7ba0f4ed68517e69ca99afb5d85c6f9e25432ad49fa2bbbb4dd3: Status 404 returned error can't find the container with id 0c195c9d24eb7ba0f4ed68517e69ca99afb5d85c6f9e25432ad49fa2bbbb4dd3 Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.144821 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vnpkm"] Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.146036 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.151969 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.153021 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.154872 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vnpkm"] Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.211090 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:09 crc kubenswrapper[4787]: W0129 13:39:09.215909 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2cf63d5_d5ba_46e9_ae91_046bc6f8e268.slice/crio-bcb423f1e21d386ecb316e56e22d1751c9d7ea62a57b5acca7f4fb2d9d365969 WatchSource:0}: Error finding container bcb423f1e21d386ecb316e56e22d1751c9d7ea62a57b5acca7f4fb2d9d365969: Status 404 returned error can't find the container with id bcb423f1e21d386ecb316e56e22d1751c9d7ea62a57b5acca7f4fb2d9d365969 Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.284230 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.284485 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dbvs\" (UniqueName: \"kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.284533 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.284578 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.304270 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:09 crc kubenswrapper[4787]: W0129 13:39:09.313763 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9881f277_909c_4aaa_b3a0_97abadeb2ccf.slice/crio-49677c9884e42089fdd61071a4dd379ea9266eebb8f1d112c5be7db25d538705 WatchSource:0}: Error finding container 49677c9884e42089fdd61071a4dd379ea9266eebb8f1d112c5be7db25d538705: Status 404 returned error can't find the container with id 49677c9884e42089fdd61071a4dd379ea9266eebb8f1d112c5be7db25d538705 Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.386651 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.386791 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dbvs\" (UniqueName: \"kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.387144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.387171 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.390995 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.391059 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.394900 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.409504 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dbvs\" (UniqueName: \"kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs\") pod \"nova-cell1-conductor-db-sync-vnpkm\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.501563 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.940024 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9881f277-909c-4aaa-b3a0-97abadeb2ccf","Type":"ContainerStarted","Data":"49677c9884e42089fdd61071a4dd379ea9266eebb8f1d112c5be7db25d538705"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.941730 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a6affeb5-7b7c-450a-ae9b-e1288b44acd2","Type":"ContainerStarted","Data":"d3cb9ffc310f6526ea721f472048c197c287487f007f39b8a026837c7b928631"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.943649 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nvhcw" event={"ID":"3c0e8878-777a-4637-906a-c23cd622a9ee","Type":"ContainerStarted","Data":"5c0e1b31ffd86bab49198138d4e0ac050ebf0c35b80508d01ec92940afd2ce53"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.948884 4787 generic.go:334] "Generic (PLEG): container finished" podID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerID="8b2100bbd0e7b597c6ba63187bc4b2902d0babe05931444cae6a5be05bb52020" exitCode=0 Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.949103 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" event={"ID":"617d1f09-2a16-4006-9f10-a71a24c67f98","Type":"ContainerDied","Data":"8b2100bbd0e7b597c6ba63187bc4b2902d0babe05931444cae6a5be05bb52020"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.949145 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" event={"ID":"617d1f09-2a16-4006-9f10-a71a24c67f98","Type":"ContainerStarted","Data":"0c195c9d24eb7ba0f4ed68517e69ca99afb5d85c6f9e25432ad49fa2bbbb4dd3"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.955749 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerStarted","Data":"0e070230963b018e4fc0c8c982ccbba7c854a6a9b163b15a09bbfef744bfc6d9"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.974521 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerStarted","Data":"bcb423f1e21d386ecb316e56e22d1751c9d7ea62a57b5acca7f4fb2d9d365969"} Jan 29 13:39:09 crc kubenswrapper[4787]: I0129 13:39:09.980172 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-nvhcw" podStartSLOduration=2.980153306 podStartE2EDuration="2.980153306s" podCreationTimestamp="2026-01-29 13:39:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:09.975004861 +0000 UTC m=+1388.736265147" watchObservedRunningTime="2026-01-29 13:39:09.980153306 +0000 UTC m=+1388.741413572" Jan 29 13:39:10 crc kubenswrapper[4787]: I0129 13:39:10.044743 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vnpkm"] Jan 29 13:39:10 crc kubenswrapper[4787]: I0129 13:39:10.983491 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" event={"ID":"91ee13c4-b950-4f37-8601-f05ab94d65f7","Type":"ContainerStarted","Data":"3ab098aaa3619a2e900d7cada3a9a1250c0529a3012322f8c741285c7f65ce68"} Jan 29 13:39:10 crc kubenswrapper[4787]: I0129 13:39:10.983803 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" event={"ID":"91ee13c4-b950-4f37-8601-f05ab94d65f7","Type":"ContainerStarted","Data":"c6037047934a34f6412d81e5593327c467bbc1c5126fb9218bd1fe0e81eb879f"} Jan 29 13:39:10 crc kubenswrapper[4787]: I0129 13:39:10.993871 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" event={"ID":"617d1f09-2a16-4006-9f10-a71a24c67f98","Type":"ContainerStarted","Data":"d6cd7686c6206a0fcda3a3bcfcb0c1008e467ae48af3659840efcbec80fefe41"} Jan 29 13:39:11 crc kubenswrapper[4787]: I0129 13:39:11.000960 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" podStartSLOduration=2.000944411 podStartE2EDuration="2.000944411s" podCreationTimestamp="2026-01-29 13:39:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:10.996742933 +0000 UTC m=+1389.758003209" watchObservedRunningTime="2026-01-29 13:39:11.000944411 +0000 UTC m=+1389.762204697" Jan 29 13:39:11 crc kubenswrapper[4787]: I0129 13:39:11.036918 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" podStartSLOduration=4.03690091 podStartE2EDuration="4.03690091s" podCreationTimestamp="2026-01-29 13:39:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:11.029170393 +0000 UTC m=+1389.790430669" watchObservedRunningTime="2026-01-29 13:39:11.03690091 +0000 UTC m=+1389.798161186" Jan 29 13:39:11 crc kubenswrapper[4787]: I0129 13:39:11.615054 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:11 crc kubenswrapper[4787]: I0129 13:39:11.631363 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:12 crc kubenswrapper[4787]: I0129 13:39:12.017432 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.039027 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9881f277-909c-4aaa-b3a0-97abadeb2ccf","Type":"ContainerStarted","Data":"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.039106 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6" gracePeriod=30 Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.042163 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a6affeb5-7b7c-450a-ae9b-e1288b44acd2","Type":"ContainerStarted","Data":"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.045598 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerStarted","Data":"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.045641 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerStarted","Data":"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.045692 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-log" containerID="cri-o://c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" gracePeriod=30 Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.045782 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-metadata" containerID="cri-o://fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" gracePeriod=30 Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.048181 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerStarted","Data":"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.048215 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerStarted","Data":"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f"} Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.075297 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.488895532 podStartE2EDuration="6.075278271s" podCreationTimestamp="2026-01-29 13:39:08 +0000 UTC" firstStartedPulling="2026-01-29 13:39:09.317632715 +0000 UTC m=+1388.078892991" lastFinishedPulling="2026-01-29 13:39:12.904015454 +0000 UTC m=+1391.665275730" observedRunningTime="2026-01-29 13:39:14.062895423 +0000 UTC m=+1392.824155739" watchObservedRunningTime="2026-01-29 13:39:14.075278271 +0000 UTC m=+1392.836538547" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.094438 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.410854972 podStartE2EDuration="7.094421288s" podCreationTimestamp="2026-01-29 13:39:07 +0000 UTC" firstStartedPulling="2026-01-29 13:39:09.217966059 +0000 UTC m=+1387.979226325" lastFinishedPulling="2026-01-29 13:39:12.901532365 +0000 UTC m=+1391.662792641" observedRunningTime="2026-01-29 13:39:14.082691939 +0000 UTC m=+1392.843952245" watchObservedRunningTime="2026-01-29 13:39:14.094421288 +0000 UTC m=+1392.855681574" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.110591 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.181980289 podStartE2EDuration="7.11053966s" podCreationTimestamp="2026-01-29 13:39:07 +0000 UTC" firstStartedPulling="2026-01-29 13:39:08.974338752 +0000 UTC m=+1387.735599028" lastFinishedPulling="2026-01-29 13:39:12.902898113 +0000 UTC m=+1391.664158399" observedRunningTime="2026-01-29 13:39:14.104414748 +0000 UTC m=+1392.865675054" watchObservedRunningTime="2026-01-29 13:39:14.11053966 +0000 UTC m=+1392.871799936" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.133121 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.128333194 podStartE2EDuration="7.133106813s" podCreationTimestamp="2026-01-29 13:39:07 +0000 UTC" firstStartedPulling="2026-01-29 13:39:08.901764586 +0000 UTC m=+1387.663024862" lastFinishedPulling="2026-01-29 13:39:12.906538205 +0000 UTC m=+1391.667798481" observedRunningTime="2026-01-29 13:39:14.12657418 +0000 UTC m=+1392.887834466" watchObservedRunningTime="2026-01-29 13:39:14.133106813 +0000 UTC m=+1392.894367079" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.628986 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.698253 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle\") pod \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.698401 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stnnk\" (UniqueName: \"kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk\") pod \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.698433 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs\") pod \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.698575 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data\") pod \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\" (UID: \"04b17fb3-37cd-4ea5-adb3-8f7c94158e44\") " Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.700131 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs" (OuterVolumeSpecName: "logs") pod "04b17fb3-37cd-4ea5-adb3-8f7c94158e44" (UID: "04b17fb3-37cd-4ea5-adb3-8f7c94158e44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.703857 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk" (OuterVolumeSpecName: "kube-api-access-stnnk") pod "04b17fb3-37cd-4ea5-adb3-8f7c94158e44" (UID: "04b17fb3-37cd-4ea5-adb3-8f7c94158e44"). InnerVolumeSpecName "kube-api-access-stnnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.729741 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data" (OuterVolumeSpecName: "config-data") pod "04b17fb3-37cd-4ea5-adb3-8f7c94158e44" (UID: "04b17fb3-37cd-4ea5-adb3-8f7c94158e44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.742332 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04b17fb3-37cd-4ea5-adb3-8f7c94158e44" (UID: "04b17fb3-37cd-4ea5-adb3-8f7c94158e44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.801581 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.801644 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stnnk\" (UniqueName: \"kubernetes.io/projected/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-kube-api-access-stnnk\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.801674 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:14 crc kubenswrapper[4787]: I0129 13:39:14.801697 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04b17fb3-37cd-4ea5-adb3-8f7c94158e44-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.062958 4787 generic.go:334] "Generic (PLEG): container finished" podID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerID="fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" exitCode=0 Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.062996 4787 generic.go:334] "Generic (PLEG): container finished" podID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerID="c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" exitCode=143 Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.063011 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.063039 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerDied","Data":"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973"} Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.063098 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerDied","Data":"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac"} Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.063110 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"04b17fb3-37cd-4ea5-adb3-8f7c94158e44","Type":"ContainerDied","Data":"0e070230963b018e4fc0c8c982ccbba7c854a6a9b163b15a09bbfef744bfc6d9"} Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.063129 4787 scope.go:117] "RemoveContainer" containerID="fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.089426 4787 scope.go:117] "RemoveContainer" containerID="c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.102686 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.126345 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.128563 4787 scope.go:117] "RemoveContainer" containerID="fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" Jan 29 13:39:15 crc kubenswrapper[4787]: E0129 13:39:15.130292 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973\": container with ID starting with fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973 not found: ID does not exist" containerID="fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.130325 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973"} err="failed to get container status \"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973\": rpc error: code = NotFound desc = could not find container \"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973\": container with ID starting with fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973 not found: ID does not exist" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.130366 4787 scope.go:117] "RemoveContainer" containerID="c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" Jan 29 13:39:15 crc kubenswrapper[4787]: E0129 13:39:15.131001 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac\": container with ID starting with c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac not found: ID does not exist" containerID="c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.131050 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac"} err="failed to get container status \"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac\": rpc error: code = NotFound desc = could not find container \"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac\": container with ID starting with c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac not found: ID does not exist" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.131084 4787 scope.go:117] "RemoveContainer" containerID="fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.131443 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973"} err="failed to get container status \"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973\": rpc error: code = NotFound desc = could not find container \"fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973\": container with ID starting with fbe6bcbd6345f41a1cc4b0b958c6abe2df65657a64e0a1f47f63f4b4ca2c1973 not found: ID does not exist" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.131486 4787 scope.go:117] "RemoveContainer" containerID="c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.132730 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac"} err="failed to get container status \"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac\": rpc error: code = NotFound desc = could not find container \"c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac\": container with ID starting with c9a5dc9d03c50f7bce08e81ba0c51310907b587246303d259d39256491f17fac not found: ID does not exist" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.136846 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:15 crc kubenswrapper[4787]: E0129 13:39:15.137323 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-metadata" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.137340 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-metadata" Jan 29 13:39:15 crc kubenswrapper[4787]: E0129 13:39:15.137359 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-log" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.137366 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-log" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.137540 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-metadata" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.137564 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" containerName="nova-metadata-log" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.138502 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.140834 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.140905 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.173863 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.209716 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvt86\" (UniqueName: \"kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.209781 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.209972 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.210176 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.210362 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.311894 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.311960 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.312027 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.312083 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.312109 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvt86\" (UniqueName: \"kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.312488 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.316599 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.318071 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.320690 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.328880 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvt86\" (UniqueName: \"kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86\") pod \"nova-metadata-0\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.457643 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.954987 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:15 crc kubenswrapper[4787]: I0129 13:39:15.997571 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04b17fb3-37cd-4ea5-adb3-8f7c94158e44" path="/var/lib/kubelet/pods/04b17fb3-37cd-4ea5-adb3-8f7c94158e44/volumes" Jan 29 13:39:16 crc kubenswrapper[4787]: I0129 13:39:16.081428 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerStarted","Data":"613fe5bb9bb956595528dcf585df49b5e4dc8fd0ebc171b27223be8da9526b3e"} Jan 29 13:39:17 crc kubenswrapper[4787]: I0129 13:39:17.097574 4787 generic.go:334] "Generic (PLEG): container finished" podID="91ee13c4-b950-4f37-8601-f05ab94d65f7" containerID="3ab098aaa3619a2e900d7cada3a9a1250c0529a3012322f8c741285c7f65ce68" exitCode=0 Jan 29 13:39:17 crc kubenswrapper[4787]: I0129 13:39:17.097827 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" event={"ID":"91ee13c4-b950-4f37-8601-f05ab94d65f7","Type":"ContainerDied","Data":"3ab098aaa3619a2e900d7cada3a9a1250c0529a3012322f8c741285c7f65ce68"} Jan 29 13:39:17 crc kubenswrapper[4787]: I0129 13:39:17.103325 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerStarted","Data":"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0"} Jan 29 13:39:17 crc kubenswrapper[4787]: I0129 13:39:17.103409 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerStarted","Data":"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963"} Jan 29 13:39:17 crc kubenswrapper[4787]: I0129 13:39:17.155417 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.155399723 podStartE2EDuration="2.155399723s" podCreationTimestamp="2026-01-29 13:39:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:17.146949776 +0000 UTC m=+1395.908210052" watchObservedRunningTime="2026-01-29 13:39:17.155399723 +0000 UTC m=+1395.916659999" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.068059 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.069020 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.118733 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.122950 4787 generic.go:334] "Generic (PLEG): container finished" podID="3c0e8878-777a-4637-906a-c23cd622a9ee" containerID="5c0e1b31ffd86bab49198138d4e0ac050ebf0c35b80508d01ec92940afd2ce53" exitCode=0 Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.123168 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nvhcw" event={"ID":"3c0e8878-777a-4637-906a-c23cd622a9ee","Type":"ContainerDied","Data":"5c0e1b31ffd86bab49198138d4e0ac050ebf0c35b80508d01ec92940afd2ce53"} Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.166425 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.437353 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.500821 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.500912 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.527838 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.528097 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="dnsmasq-dns" containerID="cri-o://326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60" gracePeriod=10 Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.593091 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.632350 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.679312 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dbvs\" (UniqueName: \"kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs\") pod \"91ee13c4-b950-4f37-8601-f05ab94d65f7\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.680545 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data\") pod \"91ee13c4-b950-4f37-8601-f05ab94d65f7\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.680627 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle\") pod \"91ee13c4-b950-4f37-8601-f05ab94d65f7\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.680808 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts\") pod \"91ee13c4-b950-4f37-8601-f05ab94d65f7\" (UID: \"91ee13c4-b950-4f37-8601-f05ab94d65f7\") " Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.695010 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs" (OuterVolumeSpecName: "kube-api-access-9dbvs") pod "91ee13c4-b950-4f37-8601-f05ab94d65f7" (UID: "91ee13c4-b950-4f37-8601-f05ab94d65f7"). InnerVolumeSpecName "kube-api-access-9dbvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.713560 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts" (OuterVolumeSpecName: "scripts") pod "91ee13c4-b950-4f37-8601-f05ab94d65f7" (UID: "91ee13c4-b950-4f37-8601-f05ab94d65f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.747869 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91ee13c4-b950-4f37-8601-f05ab94d65f7" (UID: "91ee13c4-b950-4f37-8601-f05ab94d65f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.749997 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data" (OuterVolumeSpecName: "config-data") pod "91ee13c4-b950-4f37-8601-f05ab94d65f7" (UID: "91ee13c4-b950-4f37-8601-f05ab94d65f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.783243 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.783274 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dbvs\" (UniqueName: \"kubernetes.io/projected/91ee13c4-b950-4f37-8601-f05ab94d65f7-kube-api-access-9dbvs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.783285 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:18 crc kubenswrapper[4787]: I0129 13:39:18.783295 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ee13c4-b950-4f37-8601-f05ab94d65f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.043233 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103365 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103415 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103476 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103517 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103560 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmj5x\" (UniqueName: \"kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.103662 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc\") pod \"6c925c65-213d-4981-83f5-55a4946c69e0\" (UID: \"6c925c65-213d-4981-83f5-55a4946c69e0\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.108375 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x" (OuterVolumeSpecName: "kube-api-access-zmj5x") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "kube-api-access-zmj5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.138796 4787 generic.go:334] "Generic (PLEG): container finished" podID="6c925c65-213d-4981-83f5-55a4946c69e0" containerID="326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60" exitCode=0 Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.138871 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerDied","Data":"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60"} Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.138905 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" event={"ID":"6c925c65-213d-4981-83f5-55a4946c69e0","Type":"ContainerDied","Data":"510c37d761fb6c3798588be00e23478fb83901ad9013a0e8b2f9e553acb170d5"} Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.138926 4787 scope.go:117] "RemoveContainer" containerID="326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.139052 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4f5fc4f-6vrjj" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.147200 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" event={"ID":"91ee13c4-b950-4f37-8601-f05ab94d65f7","Type":"ContainerDied","Data":"c6037047934a34f6412d81e5593327c467bbc1c5126fb9218bd1fe0e81eb879f"} Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.147223 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6037047934a34f6412d81e5593327c467bbc1c5126fb9218bd1fe0e81eb879f" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.147280 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vnpkm" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.188847 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.193005 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config" (OuterVolumeSpecName: "config") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.194396 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.197932 4787 scope.go:117] "RemoveContainer" containerID="8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.209060 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.209096 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.209107 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmj5x\" (UniqueName: \"kubernetes.io/projected/6c925c65-213d-4981-83f5-55a4946c69e0-kube-api-access-zmj5x\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.209126 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.217855 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:39:19 crc kubenswrapper[4787]: E0129 13:39:19.218723 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="init" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.218748 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="init" Jan 29 13:39:19 crc kubenswrapper[4787]: E0129 13:39:19.218760 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="dnsmasq-dns" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.218769 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="dnsmasq-dns" Jan 29 13:39:19 crc kubenswrapper[4787]: E0129 13:39:19.218793 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ee13c4-b950-4f37-8601-f05ab94d65f7" containerName="nova-cell1-conductor-db-sync" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.218802 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ee13c4-b950-4f37-8601-f05ab94d65f7" containerName="nova-cell1-conductor-db-sync" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.221164 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" containerName="dnsmasq-dns" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.221174 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.221203 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ee13c4-b950-4f37-8601-f05ab94d65f7" containerName="nova-cell1-conductor-db-sync" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.224372 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.227809 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c925c65-213d-4981-83f5-55a4946c69e0" (UID: "6c925c65-213d-4981-83f5-55a4946c69e0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.228435 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.230708 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.254869 4787 scope.go:117] "RemoveContainer" containerID="326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60" Jan 29 13:39:19 crc kubenswrapper[4787]: E0129 13:39:19.258733 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60\": container with ID starting with 326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60 not found: ID does not exist" containerID="326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.258792 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60"} err="failed to get container status \"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60\": rpc error: code = NotFound desc = could not find container \"326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60\": container with ID starting with 326a5d6d9034aaa6500cd1674ade847dc52cbe241a70c773479ed92409fd6c60 not found: ID does not exist" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.258827 4787 scope.go:117] "RemoveContainer" containerID="8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718" Jan 29 13:39:19 crc kubenswrapper[4787]: E0129 13:39:19.261279 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718\": container with ID starting with 8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718 not found: ID does not exist" containerID="8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.261308 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718"} err="failed to get container status \"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718\": rpc error: code = NotFound desc = could not find container \"8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718\": container with ID starting with 8fc542a2632f57b34bad8974a68b1fc399dadb1eb2f59b9e4602ac9096f08718 not found: ID does not exist" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.311030 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zd9q\" (UniqueName: \"kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.311273 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.311400 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.311978 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.312045 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c925c65-213d-4981-83f5-55a4946c69e0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.413409 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zd9q\" (UniqueName: \"kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.413940 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.413991 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.418735 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.419898 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.433831 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zd9q\" (UniqueName: \"kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q\") pod \"nova-cell1-conductor-0\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.547968 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.562744 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.585207 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.585750 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.585987 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.611697 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b4f5fc4f-6vrjj"] Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.622335 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k94md\" (UniqueName: \"kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md\") pod \"3c0e8878-777a-4637-906a-c23cd622a9ee\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.622394 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data\") pod \"3c0e8878-777a-4637-906a-c23cd622a9ee\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.622475 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts\") pod \"3c0e8878-777a-4637-906a-c23cd622a9ee\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.622529 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle\") pod \"3c0e8878-777a-4637-906a-c23cd622a9ee\" (UID: \"3c0e8878-777a-4637-906a-c23cd622a9ee\") " Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.628322 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts" (OuterVolumeSpecName: "scripts") pod "3c0e8878-777a-4637-906a-c23cd622a9ee" (UID: "3c0e8878-777a-4637-906a-c23cd622a9ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.628393 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md" (OuterVolumeSpecName: "kube-api-access-k94md") pod "3c0e8878-777a-4637-906a-c23cd622a9ee" (UID: "3c0e8878-777a-4637-906a-c23cd622a9ee"). InnerVolumeSpecName "kube-api-access-k94md". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.667692 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data" (OuterVolumeSpecName: "config-data") pod "3c0e8878-777a-4637-906a-c23cd622a9ee" (UID: "3c0e8878-777a-4637-906a-c23cd622a9ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.675734 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c0e8878-777a-4637-906a-c23cd622a9ee" (UID: "3c0e8878-777a-4637-906a-c23cd622a9ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.725375 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k94md\" (UniqueName: \"kubernetes.io/projected/3c0e8878-777a-4637-906a-c23cd622a9ee-kube-api-access-k94md\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.727710 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.727728 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:19 crc kubenswrapper[4787]: I0129 13:39:19.727739 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c0e8878-777a-4637-906a-c23cd622a9ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.022021 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c925c65-213d-4981-83f5-55a4946c69e0" path="/var/lib/kubelet/pods/6c925c65-213d-4981-83f5-55a4946c69e0/volumes" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.091786 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.186599 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-nvhcw" event={"ID":"3c0e8878-777a-4637-906a-c23cd622a9ee","Type":"ContainerDied","Data":"ef6817e5d3101342391d06a8f3b98c03c29c7799e14d4c1f162fad0bee06b76a"} Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.186639 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef6817e5d3101342391d06a8f3b98c03c29c7799e14d4c1f162fad0bee06b76a" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.186691 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-nvhcw" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.193360 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f73803d0-ec9b-4483-a509-7bff9afb1d85","Type":"ContainerStarted","Data":"3c4253f6e6cfc2c7c17384367dcdcce3e84c35a4fae868a2f8beccc85a75f330"} Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.292270 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.292592 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-log" containerID="cri-o://986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f" gracePeriod=30 Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.292655 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-api" containerID="cri-o://f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629" gracePeriod=30 Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.304053 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.304294 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerName="nova-scheduler-scheduler" containerID="cri-o://83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" gracePeriod=30 Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.332697 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.332920 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-log" containerID="cri-o://5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" gracePeriod=30 Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.332988 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-metadata" containerID="cri-o://6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" gracePeriod=30 Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.458487 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.458533 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:39:20 crc kubenswrapper[4787]: I0129 13:39:20.890237 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.064078 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvt86\" (UniqueName: \"kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86\") pod \"6ef01160-2717-4bf8-bf48-f938fc1393bb\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.064214 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data\") pod \"6ef01160-2717-4bf8-bf48-f938fc1393bb\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.064260 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs\") pod \"6ef01160-2717-4bf8-bf48-f938fc1393bb\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.064285 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs\") pod \"6ef01160-2717-4bf8-bf48-f938fc1393bb\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.064307 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle\") pod \"6ef01160-2717-4bf8-bf48-f938fc1393bb\" (UID: \"6ef01160-2717-4bf8-bf48-f938fc1393bb\") " Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.065429 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs" (OuterVolumeSpecName: "logs") pod "6ef01160-2717-4bf8-bf48-f938fc1393bb" (UID: "6ef01160-2717-4bf8-bf48-f938fc1393bb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.066084 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef01160-2717-4bf8-bf48-f938fc1393bb-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.073154 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86" (OuterVolumeSpecName: "kube-api-access-rvt86") pod "6ef01160-2717-4bf8-bf48-f938fc1393bb" (UID: "6ef01160-2717-4bf8-bf48-f938fc1393bb"). InnerVolumeSpecName "kube-api-access-rvt86". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.095243 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data" (OuterVolumeSpecName: "config-data") pod "6ef01160-2717-4bf8-bf48-f938fc1393bb" (UID: "6ef01160-2717-4bf8-bf48-f938fc1393bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.099589 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ef01160-2717-4bf8-bf48-f938fc1393bb" (UID: "6ef01160-2717-4bf8-bf48-f938fc1393bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.141342 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "6ef01160-2717-4bf8-bf48-f938fc1393bb" (UID: "6ef01160-2717-4bf8-bf48-f938fc1393bb"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.167836 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.167878 4787 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.167897 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef01160-2717-4bf8-bf48-f938fc1393bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.167914 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvt86\" (UniqueName: \"kubernetes.io/projected/6ef01160-2717-4bf8-bf48-f938fc1393bb-kube-api-access-rvt86\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207245 4787 generic.go:334] "Generic (PLEG): container finished" podID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerID="6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" exitCode=0 Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207278 4787 generic.go:334] "Generic (PLEG): container finished" podID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerID="5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" exitCode=143 Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207336 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerDied","Data":"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0"} Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207372 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207926 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerDied","Data":"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963"} Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207970 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6ef01160-2717-4bf8-bf48-f938fc1393bb","Type":"ContainerDied","Data":"613fe5bb9bb956595528dcf585df49b5e4dc8fd0ebc171b27223be8da9526b3e"} Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.207994 4787 scope.go:117] "RemoveContainer" containerID="6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.210704 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f73803d0-ec9b-4483-a509-7bff9afb1d85","Type":"ContainerStarted","Data":"8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884"} Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.214509 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.216713 4787 generic.go:334] "Generic (PLEG): container finished" podID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerID="986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f" exitCode=143 Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.216760 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerDied","Data":"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f"} Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.236397 4787 scope.go:117] "RemoveContainer" containerID="5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.241128 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.241110064 podStartE2EDuration="2.241110064s" podCreationTimestamp="2026-01-29 13:39:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:21.23492158 +0000 UTC m=+1399.996181856" watchObservedRunningTime="2026-01-29 13:39:21.241110064 +0000 UTC m=+1400.002370340" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.266173 4787 scope.go:117] "RemoveContainer" containerID="6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.266552 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:21 crc kubenswrapper[4787]: E0129 13:39:21.266626 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0\": container with ID starting with 6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0 not found: ID does not exist" containerID="6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.266655 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0"} err="failed to get container status \"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0\": rpc error: code = NotFound desc = could not find container \"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0\": container with ID starting with 6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0 not found: ID does not exist" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.266677 4787 scope.go:117] "RemoveContainer" containerID="5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" Jan 29 13:39:21 crc kubenswrapper[4787]: E0129 13:39:21.267091 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963\": container with ID starting with 5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963 not found: ID does not exist" containerID="5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.267128 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963"} err="failed to get container status \"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963\": rpc error: code = NotFound desc = could not find container \"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963\": container with ID starting with 5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963 not found: ID does not exist" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.267147 4787 scope.go:117] "RemoveContainer" containerID="6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.267490 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0"} err="failed to get container status \"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0\": rpc error: code = NotFound desc = could not find container \"6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0\": container with ID starting with 6590a7755a6c4b5cbe8faf62f812408236d09e33216b57bf519b5209ea10b3c0 not found: ID does not exist" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.267514 4787 scope.go:117] "RemoveContainer" containerID="5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.267892 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963"} err="failed to get container status \"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963\": rpc error: code = NotFound desc = could not find container \"5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963\": container with ID starting with 5165704c21b558b418651ce6d62987d8dc36396592a604b91a5f5e38f5f0f963 not found: ID does not exist" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.280828 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292011 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:21 crc kubenswrapper[4787]: E0129 13:39:21.292526 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0e8878-777a-4637-906a-c23cd622a9ee" containerName="nova-manage" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292550 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0e8878-777a-4637-906a-c23cd622a9ee" containerName="nova-manage" Jan 29 13:39:21 crc kubenswrapper[4787]: E0129 13:39:21.292585 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-metadata" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292595 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-metadata" Jan 29 13:39:21 crc kubenswrapper[4787]: E0129 13:39:21.292613 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-log" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292621 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-log" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292844 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-log" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292879 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0e8878-777a-4637-906a-c23cd622a9ee" containerName="nova-manage" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.292900 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" containerName="nova-metadata-metadata" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.294241 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.298314 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.298344 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.304186 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.381272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.381306 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vtl2\" (UniqueName: \"kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.381333 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.381506 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.381852 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484018 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484074 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vtl2\" (UniqueName: \"kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484099 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484155 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484310 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.484822 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.487870 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.488508 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.489855 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.511828 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vtl2\" (UniqueName: \"kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2\") pod \"nova-metadata-0\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.612173 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:39:21 crc kubenswrapper[4787]: I0129 13:39:21.996287 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ef01160-2717-4bf8-bf48-f938fc1393bb" path="/var/lib/kubelet/pods/6ef01160-2717-4bf8-bf48-f938fc1393bb/volumes" Jan 29 13:39:22 crc kubenswrapper[4787]: I0129 13:39:22.049685 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:39:22 crc kubenswrapper[4787]: I0129 13:39:22.227625 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerStarted","Data":"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945"} Jan 29 13:39:22 crc kubenswrapper[4787]: I0129 13:39:22.227976 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerStarted","Data":"8285dfc944598c5811aaa76bdbd24ef65fcab5e3d83912b3aa909a3ea9dbc16e"} Jan 29 13:39:23 crc kubenswrapper[4787]: E0129 13:39:23.070859 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:39:23 crc kubenswrapper[4787]: E0129 13:39:23.073522 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:39:23 crc kubenswrapper[4787]: E0129 13:39:23.075109 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:39:23 crc kubenswrapper[4787]: E0129 13:39:23.075191 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerName="nova-scheduler-scheduler" Jan 29 13:39:23 crc kubenswrapper[4787]: I0129 13:39:23.239210 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerStarted","Data":"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9"} Jan 29 13:39:23 crc kubenswrapper[4787]: I0129 13:39:23.266214 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.266192651 podStartE2EDuration="2.266192651s" podCreationTimestamp="2026-01-29 13:39:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:23.263384712 +0000 UTC m=+1402.024644998" watchObservedRunningTime="2026-01-29 13:39:23.266192651 +0000 UTC m=+1402.027452937" Jan 29 13:39:24 crc kubenswrapper[4787]: I0129 13:39:24.888100 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.017171 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.057641 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7hq5\" (UniqueName: \"kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5\") pod \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.057724 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle\") pod \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.057808 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data\") pod \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\" (UID: \"a6affeb5-7b7c-450a-ae9b-e1288b44acd2\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.070724 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5" (OuterVolumeSpecName: "kube-api-access-w7hq5") pod "a6affeb5-7b7c-450a-ae9b-e1288b44acd2" (UID: "a6affeb5-7b7c-450a-ae9b-e1288b44acd2"). InnerVolumeSpecName "kube-api-access-w7hq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.087800 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6affeb5-7b7c-450a-ae9b-e1288b44acd2" (UID: "a6affeb5-7b7c-450a-ae9b-e1288b44acd2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.095807 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data" (OuterVolumeSpecName: "config-data") pod "a6affeb5-7b7c-450a-ae9b-e1288b44acd2" (UID: "a6affeb5-7b7c-450a-ae9b-e1288b44acd2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.160489 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle\") pod \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.160624 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data\") pod \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.160719 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfztq\" (UniqueName: \"kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq\") pod \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.160823 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs\") pod \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\" (UID: \"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268\") " Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.162597 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.162631 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7hq5\" (UniqueName: \"kubernetes.io/projected/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-kube-api-access-w7hq5\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.162645 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6affeb5-7b7c-450a-ae9b-e1288b44acd2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.162784 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs" (OuterVolumeSpecName: "logs") pod "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" (UID: "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.171014 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq" (OuterVolumeSpecName: "kube-api-access-kfztq") pod "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" (UID: "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268"). InnerVolumeSpecName "kube-api-access-kfztq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.206138 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" (UID: "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.207872 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data" (OuterVolumeSpecName: "config-data") pod "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" (UID: "f2cf63d5-d5ba-46e9-ae91-046bc6f8e268"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.262683 4787 generic.go:334] "Generic (PLEG): container finished" podID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" exitCode=0 Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.262799 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.262805 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a6affeb5-7b7c-450a-ae9b-e1288b44acd2","Type":"ContainerDied","Data":"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2"} Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.263027 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a6affeb5-7b7c-450a-ae9b-e1288b44acd2","Type":"ContainerDied","Data":"d3cb9ffc310f6526ea721f472048c197c287487f007f39b8a026837c7b928631"} Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.263074 4787 scope.go:117] "RemoveContainer" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.264705 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.269898 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.269955 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.269979 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfztq\" (UniqueName: \"kubernetes.io/projected/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268-kube-api-access-kfztq\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.283302 4787 generic.go:334] "Generic (PLEG): container finished" podID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerID="f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629" exitCode=0 Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.283352 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerDied","Data":"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629"} Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.283383 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f2cf63d5-d5ba-46e9-ae91-046bc6f8e268","Type":"ContainerDied","Data":"bcb423f1e21d386ecb316e56e22d1751c9d7ea62a57b5acca7f4fb2d9d365969"} Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.283569 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.290925 4787 scope.go:117] "RemoveContainer" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.291774 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2\": container with ID starting with 83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2 not found: ID does not exist" containerID="83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.291821 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2"} err="failed to get container status \"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2\": rpc error: code = NotFound desc = could not find container \"83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2\": container with ID starting with 83a2fb9e2df6a318c71e9137d8878452cd584327f46cf9e7ff83fe90e48d5ab2 not found: ID does not exist" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.291858 4787 scope.go:117] "RemoveContainer" containerID="f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.331618 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.343648 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.349702 4787 scope.go:117] "RemoveContainer" containerID="986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.355280 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.363997 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.365575 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-log" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.365840 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-log" Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.365995 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerName="nova-scheduler-scheduler" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.366108 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerName="nova-scheduler-scheduler" Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.366227 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-api" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.366545 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-api" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.366964 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" containerName="nova-scheduler-scheduler" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.367087 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-log" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.367415 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" containerName="nova-api-api" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.368673 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.372393 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.395644 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.398808 4787 scope.go:117] "RemoveContainer" containerID="f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629" Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.399393 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629\": container with ID starting with f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629 not found: ID does not exist" containerID="f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.399440 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629"} err="failed to get container status \"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629\": rpc error: code = NotFound desc = could not find container \"f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629\": container with ID starting with f9d5f756a0b7cb8d12c5a791283ff5aaf66e0b7f1dc59acfd433a48186624629 not found: ID does not exist" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.399583 4787 scope.go:117] "RemoveContainer" containerID="986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f" Jan 29 13:39:25 crc kubenswrapper[4787]: E0129 13:39:25.400309 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f\": container with ID starting with 986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f not found: ID does not exist" containerID="986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.400416 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f"} err="failed to get container status \"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f\": rpc error: code = NotFound desc = could not find container \"986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f\": container with ID starting with 986bbf9c1197dfb0116d51830c5685f5f5fbcdc27dc537bfdb406cd94c32939f not found: ID does not exist" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.409085 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.424299 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.426189 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.428807 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.453592 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.473045 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.473124 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.473181 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56rz7\" (UniqueName: \"kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574314 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56rz7\" (UniqueName: \"kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574651 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574680 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ptdh\" (UniqueName: \"kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574741 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574785 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574848 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.574909 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.579001 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.579209 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.588849 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56rz7\" (UniqueName: \"kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7\") pod \"nova-scheduler-0\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.675965 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.676121 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.676144 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ptdh\" (UniqueName: \"kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.676165 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.677041 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.680491 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.681153 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.697302 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.700568 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ptdh\" (UniqueName: \"kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh\") pod \"nova-api-0\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.751795 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:25 crc kubenswrapper[4787]: I0129 13:39:25.997620 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6affeb5-7b7c-450a-ae9b-e1288b44acd2" path="/var/lib/kubelet/pods/a6affeb5-7b7c-450a-ae9b-e1288b44acd2/volumes" Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:25.999867 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2cf63d5-d5ba-46e9-ae91-046bc6f8e268" path="/var/lib/kubelet/pods/f2cf63d5-d5ba-46e9-ae91-046bc6f8e268/volumes" Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:26.212593 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:26.315616 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:26.317799 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7","Type":"ContainerStarted","Data":"1041c45faac67130e2b5ec92ac5524d36b36e1006a5001dab346515d01256875"} Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:26.612979 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:39:26 crc kubenswrapper[4787]: I0129 13:39:26.613032 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.330008 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7","Type":"ContainerStarted","Data":"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99"} Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.332321 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerStarted","Data":"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8"} Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.332367 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerStarted","Data":"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d"} Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.332385 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerStarted","Data":"bd2ba9f3dca132a7c00fd4b09b79dff3b7bd4bbf7244156c5fd71d943f98b139"} Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.352325 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.352307093 podStartE2EDuration="2.352307093s" podCreationTimestamp="2026-01-29 13:39:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:27.345533303 +0000 UTC m=+1406.106793609" watchObservedRunningTime="2026-01-29 13:39:27.352307093 +0000 UTC m=+1406.113567379" Jan 29 13:39:27 crc kubenswrapper[4787]: I0129 13:39:27.375651 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.375632978 podStartE2EDuration="2.375632978s" podCreationTimestamp="2026-01-29 13:39:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:27.36822436 +0000 UTC m=+1406.129484666" watchObservedRunningTime="2026-01-29 13:39:27.375632978 +0000 UTC m=+1406.136893264" Jan 29 13:39:29 crc kubenswrapper[4787]: I0129 13:39:29.633302 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 29 13:39:30 crc kubenswrapper[4787]: I0129 13:39:30.697947 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 13:39:31 crc kubenswrapper[4787]: I0129 13:39:31.613322 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:39:31 crc kubenswrapper[4787]: I0129 13:39:31.613385 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:39:32 crc kubenswrapper[4787]: I0129 13:39:32.631815 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:32 crc kubenswrapper[4787]: I0129 13:39:32.631816 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:33 crc kubenswrapper[4787]: I0129 13:39:33.530476 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 13:39:35 crc kubenswrapper[4787]: I0129 13:39:35.697517 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 13:39:35 crc kubenswrapper[4787]: I0129 13:39:35.742111 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 13:39:35 crc kubenswrapper[4787]: I0129 13:39:35.751980 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:39:35 crc kubenswrapper[4787]: I0129 13:39:35.752200 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:39:36 crc kubenswrapper[4787]: I0129 13:39:36.446752 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 13:39:36 crc kubenswrapper[4787]: I0129 13:39:36.834769 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:36 crc kubenswrapper[4787]: I0129 13:39:36.835625 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 29 13:39:37 crc kubenswrapper[4787]: I0129 13:39:37.752136 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:37 crc kubenswrapper[4787]: I0129 13:39:37.752687 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="baf39877-8374-4f5a-91a6-60b55b5d6514" containerName="kube-state-metrics" containerID="cri-o://3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9" gracePeriod=30 Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.305539 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.419668 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5h6d\" (UniqueName: \"kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d\") pod \"baf39877-8374-4f5a-91a6-60b55b5d6514\" (UID: \"baf39877-8374-4f5a-91a6-60b55b5d6514\") " Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.435299 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d" (OuterVolumeSpecName: "kube-api-access-r5h6d") pod "baf39877-8374-4f5a-91a6-60b55b5d6514" (UID: "baf39877-8374-4f5a-91a6-60b55b5d6514"). InnerVolumeSpecName "kube-api-access-r5h6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.439294 4787 generic.go:334] "Generic (PLEG): container finished" podID="baf39877-8374-4f5a-91a6-60b55b5d6514" containerID="3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9" exitCode=2 Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.439356 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.439383 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"baf39877-8374-4f5a-91a6-60b55b5d6514","Type":"ContainerDied","Data":"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9"} Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.439520 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"baf39877-8374-4f5a-91a6-60b55b5d6514","Type":"ContainerDied","Data":"9877b5ffc153f60bcd31450a3631fe086ad55d52c2598b7bb3be1fa68af9fbcf"} Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.439567 4787 scope.go:117] "RemoveContainer" containerID="3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.489396 4787 scope.go:117] "RemoveContainer" containerID="3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9" Jan 29 13:39:38 crc kubenswrapper[4787]: E0129 13:39:38.491623 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9\": container with ID starting with 3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9 not found: ID does not exist" containerID="3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.491673 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9"} err="failed to get container status \"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9\": rpc error: code = NotFound desc = could not find container \"3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9\": container with ID starting with 3567449658dee378d0720da514d98795b8aeec5d506a95693d24fc856ac0e5b9 not found: ID does not exist" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.523340 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5h6d\" (UniqueName: \"kubernetes.io/projected/baf39877-8374-4f5a-91a6-60b55b5d6514-kube-api-access-r5h6d\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.526090 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.538668 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.550442 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:38 crc kubenswrapper[4787]: E0129 13:39:38.550863 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baf39877-8374-4f5a-91a6-60b55b5d6514" containerName="kube-state-metrics" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.550878 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="baf39877-8374-4f5a-91a6-60b55b5d6514" containerName="kube-state-metrics" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.551058 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="baf39877-8374-4f5a-91a6-60b55b5d6514" containerName="kube-state-metrics" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.551664 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.554156 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.554326 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.576657 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.624986 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.625075 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.625147 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.625228 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz5nf\" (UniqueName: \"kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.726615 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz5nf\" (UniqueName: \"kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.726695 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.726767 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.726829 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.732793 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.740253 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.740372 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.760680 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz5nf\" (UniqueName: \"kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf\") pod \"kube-state-metrics-0\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " pod="openstack/kube-state-metrics-0" Jan 29 13:39:38 crc kubenswrapper[4787]: I0129 13:39:38.875263 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.316274 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:39:39 crc kubenswrapper[4787]: W0129 13:39:39.322381 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd21a5fb3_2d4b_4b53_8fe6_45fe636362b4.slice/crio-086323df70f3f8b9810725ffa69a96b1490cf46097669d3fecec1553fbcd66aa WatchSource:0}: Error finding container 086323df70f3f8b9810725ffa69a96b1490cf46097669d3fecec1553fbcd66aa: Status 404 returned error can't find the container with id 086323df70f3f8b9810725ffa69a96b1490cf46097669d3fecec1553fbcd66aa Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.452364 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4","Type":"ContainerStarted","Data":"086323df70f3f8b9810725ffa69a96b1490cf46097669d3fecec1553fbcd66aa"} Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.755939 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.756288 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-central-agent" containerID="cri-o://920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859" gracePeriod=30 Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.756371 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="proxy-httpd" containerID="cri-o://f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7" gracePeriod=30 Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.756400 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="sg-core" containerID="cri-o://ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee" gracePeriod=30 Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.756403 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-notification-agent" containerID="cri-o://f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55" gracePeriod=30 Jan 29 13:39:39 crc kubenswrapper[4787]: I0129 13:39:39.997205 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baf39877-8374-4f5a-91a6-60b55b5d6514" path="/var/lib/kubelet/pods/baf39877-8374-4f5a-91a6-60b55b5d6514/volumes" Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462120 4787 generic.go:334] "Generic (PLEG): container finished" podID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerID="f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7" exitCode=0 Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462157 4787 generic.go:334] "Generic (PLEG): container finished" podID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerID="ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee" exitCode=2 Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462173 4787 generic.go:334] "Generic (PLEG): container finished" podID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerID="920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859" exitCode=0 Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462181 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerDied","Data":"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7"} Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462215 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerDied","Data":"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee"} Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.462227 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerDied","Data":"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859"} Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.463819 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4","Type":"ContainerStarted","Data":"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85"} Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.463943 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 29 13:39:40 crc kubenswrapper[4787]: I0129 13:39:40.477412 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.1136332429999998 podStartE2EDuration="2.47739248s" podCreationTimestamp="2026-01-29 13:39:38 +0000 UTC" firstStartedPulling="2026-01-29 13:39:39.325587489 +0000 UTC m=+1418.086847765" lastFinishedPulling="2026-01-29 13:39:39.689346726 +0000 UTC m=+1418.450607002" observedRunningTime="2026-01-29 13:39:40.475131167 +0000 UTC m=+1419.236391463" watchObservedRunningTime="2026-01-29 13:39:40.47739248 +0000 UTC m=+1419.238652746" Jan 29 13:39:41 crc kubenswrapper[4787]: I0129 13:39:41.621226 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:39:41 crc kubenswrapper[4787]: I0129 13:39:41.624789 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:39:41 crc kubenswrapper[4787]: I0129 13:39:41.646347 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:39:42 crc kubenswrapper[4787]: I0129 13:39:42.486731 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.313895 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.419966 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420037 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420080 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x8jd\" (UniqueName: \"kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420130 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420174 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420203 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420252 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd\") pod \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\" (UID: \"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0\") " Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420771 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.420863 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.427817 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts" (OuterVolumeSpecName: "scripts") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.428491 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd" (OuterVolumeSpecName: "kube-api-access-9x8jd") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "kube-api-access-9x8jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.453406 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.490374 4787 generic.go:334] "Generic (PLEG): container finished" podID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerID="f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55" exitCode=0 Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.490487 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.490537 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerDied","Data":"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55"} Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.490573 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bcf9827-eefc-44fe-9a1d-237c95b9dbd0","Type":"ContainerDied","Data":"973f3d6d15dbabea927220affdcb15d7d42bf3c4f7fee78d8aba4c695eeda94c"} Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.490590 4787 scope.go:117] "RemoveContainer" containerID="f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.505123 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522052 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522277 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x8jd\" (UniqueName: \"kubernetes.io/projected/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-kube-api-access-9x8jd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522367 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522449 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522548 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522616 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.522951 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data" (OuterVolumeSpecName: "config-data") pod "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" (UID: "7bcf9827-eefc-44fe-9a1d-237c95b9dbd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.624757 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.844447 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.859564 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.864510 4787 scope.go:117] "RemoveContainer" containerID="ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.872713 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.873112 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="proxy-httpd" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873129 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="proxy-httpd" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.873140 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="sg-core" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873145 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="sg-core" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.873156 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-notification-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873162 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-notification-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.873178 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-central-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873184 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-central-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873353 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-central-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873369 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="sg-core" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873390 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="ceilometer-notification-agent" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.873401 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" containerName="proxy-httpd" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.875075 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.878779 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.879556 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.879836 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.891569 4787 scope.go:117] "RemoveContainer" containerID="f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.902884 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.913569 4787 scope.go:117] "RemoveContainer" containerID="920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935298 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935345 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935378 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935408 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935421 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935477 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935499 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bnl7\" (UniqueName: \"kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.935550 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.940945 4787 scope.go:117] "RemoveContainer" containerID="f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.942936 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7\": container with ID starting with f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7 not found: ID does not exist" containerID="f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.942968 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7"} err="failed to get container status \"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7\": rpc error: code = NotFound desc = could not find container \"f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7\": container with ID starting with f04aaddcfdaac2d68fb2458f07333332c451cdcefc2a42e1df00f19a8c4aede7 not found: ID does not exist" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.942987 4787 scope.go:117] "RemoveContainer" containerID="ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.947926 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee\": container with ID starting with ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee not found: ID does not exist" containerID="ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.947971 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee"} err="failed to get container status \"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee\": rpc error: code = NotFound desc = could not find container \"ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee\": container with ID starting with ba9b4633057d125c2846cbae99f78133100d17d701bb64db126ea83f8a03bcee not found: ID does not exist" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.947999 4787 scope.go:117] "RemoveContainer" containerID="f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.948668 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55\": container with ID starting with f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55 not found: ID does not exist" containerID="f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.948690 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55"} err="failed to get container status \"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55\": rpc error: code = NotFound desc = could not find container \"f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55\": container with ID starting with f3e7b58aff408ec966ff1710ea416e24d0c3abcb309a908d477329d2a012cc55 not found: ID does not exist" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.948707 4787 scope.go:117] "RemoveContainer" containerID="920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859" Jan 29 13:39:43 crc kubenswrapper[4787]: E0129 13:39:43.948941 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859\": container with ID starting with 920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859 not found: ID does not exist" containerID="920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.948959 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859"} err="failed to get container status \"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859\": rpc error: code = NotFound desc = could not find container \"920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859\": container with ID starting with 920b10445ba573c6740269811119234681ec0069bc5ca010e494e0652758c859 not found: ID does not exist" Jan 29 13:39:43 crc kubenswrapper[4787]: I0129 13:39:43.997805 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bcf9827-eefc-44fe-9a1d-237c95b9dbd0" path="/var/lib/kubelet/pods/7bcf9827-eefc-44fe-9a1d-237c95b9dbd0/volumes" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038301 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038379 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038404 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038431 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038473 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038487 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038527 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.038549 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bnl7\" (UniqueName: \"kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.039384 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.039431 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.042168 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.045784 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.046038 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.046235 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.046434 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.062094 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bnl7\" (UniqueName: \"kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7\") pod \"ceilometer-0\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.192296 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.364716 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.451359 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data\") pod \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.451406 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle\") pod \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.451503 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjrh2\" (UniqueName: \"kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2\") pod \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\" (UID: \"9881f277-909c-4aaa-b3a0-97abadeb2ccf\") " Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.459205 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2" (OuterVolumeSpecName: "kube-api-access-hjrh2") pod "9881f277-909c-4aaa-b3a0-97abadeb2ccf" (UID: "9881f277-909c-4aaa-b3a0-97abadeb2ccf"). InnerVolumeSpecName "kube-api-access-hjrh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.480304 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data" (OuterVolumeSpecName: "config-data") pod "9881f277-909c-4aaa-b3a0-97abadeb2ccf" (UID: "9881f277-909c-4aaa-b3a0-97abadeb2ccf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.480781 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9881f277-909c-4aaa-b3a0-97abadeb2ccf" (UID: "9881f277-909c-4aaa-b3a0-97abadeb2ccf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.503661 4787 generic.go:334] "Generic (PLEG): container finished" podID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" containerID="a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6" exitCode=137 Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.504345 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.504641 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9881f277-909c-4aaa-b3a0-97abadeb2ccf","Type":"ContainerDied","Data":"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6"} Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.504700 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9881f277-909c-4aaa-b3a0-97abadeb2ccf","Type":"ContainerDied","Data":"49677c9884e42089fdd61071a4dd379ea9266eebb8f1d112c5be7db25d538705"} Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.504721 4787 scope.go:117] "RemoveContainer" containerID="a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.531235 4787 scope.go:117] "RemoveContainer" containerID="a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6" Jan 29 13:39:44 crc kubenswrapper[4787]: E0129 13:39:44.531656 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6\": container with ID starting with a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6 not found: ID does not exist" containerID="a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.531697 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6"} err="failed to get container status \"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6\": rpc error: code = NotFound desc = could not find container \"a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6\": container with ID starting with a4c709bdf0065d564b63cc9316193e97da88b2b7a8b14e8399c230b7cfebd9b6 not found: ID does not exist" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.561949 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.562009 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9881f277-909c-4aaa-b3a0-97abadeb2ccf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.562022 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjrh2\" (UniqueName: \"kubernetes.io/projected/9881f277-909c-4aaa-b3a0-97abadeb2ccf-kube-api-access-hjrh2\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.565519 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.591472 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.602010 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:44 crc kubenswrapper[4787]: E0129 13:39:44.602354 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.602374 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.602586 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.603167 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.605160 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.605561 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.610944 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.612203 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.677364 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:44 crc kubenswrapper[4787]: W0129 13:39:44.677815 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7757e8f4_8b3e_4744_b609_cd4a09035507.slice/crio-4138dac0e46314455f5faaf83ab286ebafae7be5cafc903b7ed944a42e3403cf WatchSource:0}: Error finding container 4138dac0e46314455f5faaf83ab286ebafae7be5cafc903b7ed944a42e3403cf: Status 404 returned error can't find the container with id 4138dac0e46314455f5faaf83ab286ebafae7be5cafc903b7ed944a42e3403cf Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.680622 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.764971 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sds94\" (UniqueName: \"kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.765072 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.765097 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.765131 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.765161 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.866892 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.866951 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.867029 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sds94\" (UniqueName: \"kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.867101 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.867121 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.871132 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.871546 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.871985 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.873248 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.888141 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sds94\" (UniqueName: \"kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:44 crc kubenswrapper[4787]: I0129 13:39:44.921803 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.368272 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.522537 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerStarted","Data":"f4681dbb3360537e4ba3a633ca1cbad50ac4003cd13da34b3a44534bb7231957"} Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.522602 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerStarted","Data":"4138dac0e46314455f5faaf83ab286ebafae7be5cafc903b7ed944a42e3403cf"} Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.525372 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab278964-ff72-4353-b454-9587f235c492","Type":"ContainerStarted","Data":"3a40a7418a1a2b18aade4acbc66b2daaae6cb310e1db6ef891ea602f4149f10c"} Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.757783 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.757865 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.758394 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.758446 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.765859 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.768199 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.943845 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.956520 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:45 crc kubenswrapper[4787]: I0129 13:39:45.961396 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.007628 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9881f277-909c-4aaa-b3a0-97abadeb2ccf" path="/var/lib/kubelet/pods/9881f277-909c-4aaa-b3a0-97abadeb2ccf/volumes" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.086896 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.086944 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.086962 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.086983 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qptsp\" (UniqueName: \"kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.087030 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.087049 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188166 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188214 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188377 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188415 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188432 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.188452 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qptsp\" (UniqueName: \"kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.189252 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.189516 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.189777 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.190147 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.190252 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.226836 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qptsp\" (UniqueName: \"kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp\") pod \"dnsmasq-dns-867cd545c7-4q8zf\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.350102 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.609733 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab278964-ff72-4353-b454-9587f235c492","Type":"ContainerStarted","Data":"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9"} Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.620229 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerStarted","Data":"00959bbca327a09c4e21f95b96207dfcd5a47c2a01194a5d8c4b7a6b52ad9060"} Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.671767 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.671748272 podStartE2EDuration="2.671748272s" podCreationTimestamp="2026-01-29 13:39:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:46.623976821 +0000 UTC m=+1425.385237107" watchObservedRunningTime="2026-01-29 13:39:46.671748272 +0000 UTC m=+1425.433008568" Jan 29 13:39:46 crc kubenswrapper[4787]: W0129 13:39:46.850398 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa154084_240a_486e_9eb1_21620d97ec8d.slice/crio-fe52aaca207e8bab15c96c8b86d9d27b7a3d6136026caf80f3401544f646b01f WatchSource:0}: Error finding container fe52aaca207e8bab15c96c8b86d9d27b7a3d6136026caf80f3401544f646b01f: Status 404 returned error can't find the container with id fe52aaca207e8bab15c96c8b86d9d27b7a3d6136026caf80f3401544f646b01f Jan 29 13:39:46 crc kubenswrapper[4787]: I0129 13:39:46.851501 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:39:47 crc kubenswrapper[4787]: I0129 13:39:47.631910 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerStarted","Data":"cbb72651e64e9fde57d223f5ed76afc29d4cccc5534bd5d8d15f36a87f38e226"} Jan 29 13:39:47 crc kubenswrapper[4787]: I0129 13:39:47.635229 4787 generic.go:334] "Generic (PLEG): container finished" podID="aa154084-240a-486e-9eb1-21620d97ec8d" containerID="246d9c4ad6eb156d29b612e0dfe0c3ea5a66d4f35eaf032b14fcc0c513cef32c" exitCode=0 Jan 29 13:39:47 crc kubenswrapper[4787]: I0129 13:39:47.636512 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" event={"ID":"aa154084-240a-486e-9eb1-21620d97ec8d","Type":"ContainerDied","Data":"246d9c4ad6eb156d29b612e0dfe0c3ea5a66d4f35eaf032b14fcc0c513cef32c"} Jan 29 13:39:47 crc kubenswrapper[4787]: I0129 13:39:47.636545 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" event={"ID":"aa154084-240a-486e-9eb1-21620d97ec8d","Type":"ContainerStarted","Data":"fe52aaca207e8bab15c96c8b86d9d27b7a3d6136026caf80f3401544f646b01f"} Jan 29 13:39:47 crc kubenswrapper[4787]: I0129 13:39:47.759347 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.646560 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" event={"ID":"aa154084-240a-486e-9eb1-21620d97ec8d","Type":"ContainerStarted","Data":"740be74bb4087b9bcad6b9181de441c8a1b42db5c5831a84ddf16b1021efb323"} Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.648300 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.681086 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" podStartSLOduration=3.681064937 podStartE2EDuration="3.681064937s" podCreationTimestamp="2026-01-29 13:39:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:48.672764874 +0000 UTC m=+1427.434025160" watchObservedRunningTime="2026-01-29 13:39:48.681064937 +0000 UTC m=+1427.442325223" Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.729137 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.729323 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-log" containerID="cri-o://69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d" gracePeriod=30 Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.729574 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-api" containerID="cri-o://8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8" gracePeriod=30 Jan 29 13:39:48 crc kubenswrapper[4787]: I0129 13:39:48.912753 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658037 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerStarted","Data":"568e4354bf384e8ed562bd66a577c33c88bd53d6a1fd7427082ceeb5ec9d5136"} Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658184 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-central-agent" containerID="cri-o://f4681dbb3360537e4ba3a633ca1cbad50ac4003cd13da34b3a44534bb7231957" gracePeriod=30 Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658238 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="proxy-httpd" containerID="cri-o://568e4354bf384e8ed562bd66a577c33c88bd53d6a1fd7427082ceeb5ec9d5136" gracePeriod=30 Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658239 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="sg-core" containerID="cri-o://cbb72651e64e9fde57d223f5ed76afc29d4cccc5534bd5d8d15f36a87f38e226" gracePeriod=30 Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658286 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-notification-agent" containerID="cri-o://00959bbca327a09c4e21f95b96207dfcd5a47c2a01194a5d8c4b7a6b52ad9060" gracePeriod=30 Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.658393 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.665715 4787 generic.go:334] "Generic (PLEG): container finished" podID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerID="69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d" exitCode=143 Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.665814 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerDied","Data":"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d"} Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.678825 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.825262059 podStartE2EDuration="6.678810455s" podCreationTimestamp="2026-01-29 13:39:43 +0000 UTC" firstStartedPulling="2026-01-29 13:39:44.680415653 +0000 UTC m=+1423.441675929" lastFinishedPulling="2026-01-29 13:39:48.533964049 +0000 UTC m=+1427.295224325" observedRunningTime="2026-01-29 13:39:49.677403106 +0000 UTC m=+1428.438663372" watchObservedRunningTime="2026-01-29 13:39:49.678810455 +0000 UTC m=+1428.440070731" Jan 29 13:39:49 crc kubenswrapper[4787]: I0129 13:39:49.922869 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.675954 4787 generic.go:334] "Generic (PLEG): container finished" podID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerID="568e4354bf384e8ed562bd66a577c33c88bd53d6a1fd7427082ceeb5ec9d5136" exitCode=0 Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.675996 4787 generic.go:334] "Generic (PLEG): container finished" podID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerID="cbb72651e64e9fde57d223f5ed76afc29d4cccc5534bd5d8d15f36a87f38e226" exitCode=2 Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.676006 4787 generic.go:334] "Generic (PLEG): container finished" podID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerID="00959bbca327a09c4e21f95b96207dfcd5a47c2a01194a5d8c4b7a6b52ad9060" exitCode=0 Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.676026 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerDied","Data":"568e4354bf384e8ed562bd66a577c33c88bd53d6a1fd7427082ceeb5ec9d5136"} Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.676084 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerDied","Data":"cbb72651e64e9fde57d223f5ed76afc29d4cccc5534bd5d8d15f36a87f38e226"} Jan 29 13:39:50 crc kubenswrapper[4787]: I0129 13:39:50.676103 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerDied","Data":"00959bbca327a09c4e21f95b96207dfcd5a47c2a01194a5d8c4b7a6b52ad9060"} Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.683513 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.696094 4787 generic.go:334] "Generic (PLEG): container finished" podID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerID="8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8" exitCode=0 Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.696249 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerDied","Data":"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8"} Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.696481 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56","Type":"ContainerDied","Data":"bd2ba9f3dca132a7c00fd4b09b79dff3b7bd4bbf7244156c5fd71d943f98b139"} Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.696506 4787 scope.go:117] "RemoveContainer" containerID="8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.696339 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.720923 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data\") pod \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.721133 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle\") pod \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.721163 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ptdh\" (UniqueName: \"kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh\") pod \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.721192 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs\") pod \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\" (UID: \"0ed1d89b-98e5-4d5b-9af8-1d22d1252a56\") " Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.731595 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs" (OuterVolumeSpecName: "logs") pod "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" (UID: "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.732537 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.753188 4787 scope.go:117] "RemoveContainer" containerID="69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.759702 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh" (OuterVolumeSpecName: "kube-api-access-2ptdh") pod "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" (UID: "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56"). InnerVolumeSpecName "kube-api-access-2ptdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.778390 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" (UID: "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.787942 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data" (OuterVolumeSpecName: "config-data") pod "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" (UID: "0ed1d89b-98e5-4d5b-9af8-1d22d1252a56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.834056 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.834081 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ptdh\" (UniqueName: \"kubernetes.io/projected/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-kube-api-access-2ptdh\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.834092 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.836057 4787 scope.go:117] "RemoveContainer" containerID="8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8" Jan 29 13:39:52 crc kubenswrapper[4787]: E0129 13:39:52.836372 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8\": container with ID starting with 8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8 not found: ID does not exist" containerID="8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.836397 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8"} err="failed to get container status \"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8\": rpc error: code = NotFound desc = could not find container \"8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8\": container with ID starting with 8364b68ee65c233167f3bcc4b91790acd296b7fbdb642bd83cb44543669639a8 not found: ID does not exist" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.836417 4787 scope.go:117] "RemoveContainer" containerID="69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d" Jan 29 13:39:52 crc kubenswrapper[4787]: E0129 13:39:52.836735 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d\": container with ID starting with 69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d not found: ID does not exist" containerID="69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d" Jan 29 13:39:52 crc kubenswrapper[4787]: I0129 13:39:52.836766 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d"} err="failed to get container status \"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d\": rpc error: code = NotFound desc = could not find container \"69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d\": container with ID starting with 69ec7afc3130b848f9bea03758fc2f74168ee9d31f128f47f80929b35100230d not found: ID does not exist" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.025444 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.035167 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.055523 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:53 crc kubenswrapper[4787]: E0129 13:39:53.055974 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-api" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.055988 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-api" Jan 29 13:39:53 crc kubenswrapper[4787]: E0129 13:39:53.055998 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-log" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.056004 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-log" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.056169 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-log" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.056182 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" containerName="nova-api-api" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.057166 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.065014 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.065189 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.065288 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.080351 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.141491 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.141552 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.141572 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.141928 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.141991 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.142074 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8z2x\" (UniqueName: \"kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243559 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243601 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243638 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8z2x\" (UniqueName: \"kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243689 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243723 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.243741 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.244569 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.248646 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.249788 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.249849 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.250920 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.262853 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8z2x\" (UniqueName: \"kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x\") pod \"nova-api-0\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.386050 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.712930 4787 generic.go:334] "Generic (PLEG): container finished" podID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerID="f4681dbb3360537e4ba3a633ca1cbad50ac4003cd13da34b3a44534bb7231957" exitCode=0 Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.713120 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerDied","Data":"f4681dbb3360537e4ba3a633ca1cbad50ac4003cd13da34b3a44534bb7231957"} Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.760262 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854054 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854203 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854231 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854255 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854342 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bnl7\" (UniqueName: \"kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854443 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854521 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.854564 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd\") pod \"7757e8f4-8b3e-4744-b609-cd4a09035507\" (UID: \"7757e8f4-8b3e-4744-b609-cd4a09035507\") " Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.855026 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.855118 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.859298 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7" (OuterVolumeSpecName: "kube-api-access-5bnl7") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "kube-api-access-5bnl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.860672 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts" (OuterVolumeSpecName: "scripts") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.883865 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.911837 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.930514 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.943874 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960339 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960384 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960401 4787 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960418 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7757e8f4-8b3e-4744-b609-cd4a09035507-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960437 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960450 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bnl7\" (UniqueName: \"kubernetes.io/projected/7757e8f4-8b3e-4744-b609-cd4a09035507-kube-api-access-5bnl7\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.960480 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:53 crc kubenswrapper[4787]: I0129 13:39:53.997501 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ed1d89b-98e5-4d5b-9af8-1d22d1252a56" path="/var/lib/kubelet/pods/0ed1d89b-98e5-4d5b-9af8-1d22d1252a56/volumes" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.002991 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data" (OuterVolumeSpecName: "config-data") pod "7757e8f4-8b3e-4744-b609-cd4a09035507" (UID: "7757e8f4-8b3e-4744-b609-cd4a09035507"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.062494 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7757e8f4-8b3e-4744-b609-cd4a09035507-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.727745 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.727736 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7757e8f4-8b3e-4744-b609-cd4a09035507","Type":"ContainerDied","Data":"4138dac0e46314455f5faaf83ab286ebafae7be5cafc903b7ed944a42e3403cf"} Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.728213 4787 scope.go:117] "RemoveContainer" containerID="568e4354bf384e8ed562bd66a577c33c88bd53d6a1fd7427082ceeb5ec9d5136" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.731776 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerStarted","Data":"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b"} Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.731843 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerStarted","Data":"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8"} Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.731869 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerStarted","Data":"6b9a710ecb8e285848ab33437cfe8d7ea05166d59bc35b40b4958c39b6dfaaf5"} Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.751523 4787 scope.go:117] "RemoveContainer" containerID="cbb72651e64e9fde57d223f5ed76afc29d4cccc5534bd5d8d15f36a87f38e226" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.758007 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.7579884190000001 podStartE2EDuration="1.757988419s" podCreationTimestamp="2026-01-29 13:39:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:54.753678319 +0000 UTC m=+1433.514938655" watchObservedRunningTime="2026-01-29 13:39:54.757988419 +0000 UTC m=+1433.519248685" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.782020 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.791929 4787 scope.go:117] "RemoveContainer" containerID="00959bbca327a09c4e21f95b96207dfcd5a47c2a01194a5d8c4b7a6b52ad9060" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.802222 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.822805 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:54 crc kubenswrapper[4787]: E0129 13:39:54.824415 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="proxy-httpd" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.828522 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="proxy-httpd" Jan 29 13:39:54 crc kubenswrapper[4787]: E0129 13:39:54.828866 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="sg-core" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.829014 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="sg-core" Jan 29 13:39:54 crc kubenswrapper[4787]: E0129 13:39:54.829205 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-notification-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.829347 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-notification-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: E0129 13:39:54.829529 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-central-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.829682 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-central-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.830282 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="proxy-httpd" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.830510 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-central-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.830701 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="sg-core" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.830885 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" containerName="ceilometer-notification-agent" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.839564 4787 scope.go:117] "RemoveContainer" containerID="f4681dbb3360537e4ba3a633ca1cbad50ac4003cd13da34b3a44534bb7231957" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.848489 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.848606 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.851198 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.851352 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.851608 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.925428 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.945856 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.978730 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gngkq\" (UniqueName: \"kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.979364 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.979442 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.979622 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.979733 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.980311 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.980385 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:54 crc kubenswrapper[4787]: I0129 13:39:54.980506 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082497 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082595 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082624 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082649 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082684 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082811 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gngkq\" (UniqueName: \"kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082869 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.082920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.083514 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.085381 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.088185 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.088869 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.089091 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.090133 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.091818 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.123712 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gngkq\" (UniqueName: \"kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq\") pod \"ceilometer-0\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.165725 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.678654 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:39:55 crc kubenswrapper[4787]: W0129 13:39:55.681885 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc27d0b15_3660_4d2c_b5f1_89392d93317f.slice/crio-9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000 WatchSource:0}: Error finding container 9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000: Status 404 returned error can't find the container with id 9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000 Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.746793 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerStarted","Data":"9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000"} Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.770614 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:39:55 crc kubenswrapper[4787]: I0129 13:39:55.995887 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7757e8f4-8b3e-4744-b609-cd4a09035507" path="/var/lib/kubelet/pods/7757e8f4-8b3e-4744-b609-cd4a09035507/volumes" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.026938 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-64p9t"] Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.028077 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.029966 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.030767 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.041012 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-64p9t"] Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.104855 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.104917 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dglzr\" (UniqueName: \"kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.104967 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.105016 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.206205 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.206282 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dglzr\" (UniqueName: \"kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.206333 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.206384 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.210083 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.210380 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.211108 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.224777 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dglzr\" (UniqueName: \"kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr\") pod \"nova-cell1-cell-mapping-64p9t\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.350281 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.351608 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.425687 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.426413 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="dnsmasq-dns" containerID="cri-o://d6cd7686c6206a0fcda3a3bcfcb0c1008e467ae48af3659840efcbec80fefe41" gracePeriod=10 Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.782252 4787 generic.go:334] "Generic (PLEG): container finished" podID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerID="d6cd7686c6206a0fcda3a3bcfcb0c1008e467ae48af3659840efcbec80fefe41" exitCode=0 Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.782561 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" event={"ID":"617d1f09-2a16-4006-9f10-a71a24c67f98","Type":"ContainerDied","Data":"d6cd7686c6206a0fcda3a3bcfcb0c1008e467ae48af3659840efcbec80fefe41"} Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.787081 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerStarted","Data":"2ed40eb4eb2d3cdf467d437f74046a37ba0764a96809f702ca6b5a682ad85043"} Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.914228 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-64p9t"] Jan 29 13:39:56 crc kubenswrapper[4787]: W0129 13:39:56.922287 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2cd10ec_df06_4985_a309_d61bd27c0cb7.slice/crio-d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045 WatchSource:0}: Error finding container d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045: Status 404 returned error can't find the container with id d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045 Jan 29 13:39:56 crc kubenswrapper[4787]: I0129 13:39:56.973776 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020541 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020615 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020645 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6sh5\" (UniqueName: \"kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020691 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020759 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.020881 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0\") pod \"617d1f09-2a16-4006-9f10-a71a24c67f98\" (UID: \"617d1f09-2a16-4006-9f10-a71a24c67f98\") " Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.031388 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5" (OuterVolumeSpecName: "kube-api-access-w6sh5") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "kube-api-access-w6sh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.091009 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.100046 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.101032 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config" (OuterVolumeSpecName: "config") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.117734 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.123281 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "617d1f09-2a16-4006-9f10-a71a24c67f98" (UID: "617d1f09-2a16-4006-9f10-a71a24c67f98"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124555 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124594 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124606 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6sh5\" (UniqueName: \"kubernetes.io/projected/617d1f09-2a16-4006-9f10-a71a24c67f98-kube-api-access-w6sh5\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124617 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124629 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.124637 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/617d1f09-2a16-4006-9f10-a71a24c67f98-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.795161 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" event={"ID":"617d1f09-2a16-4006-9f10-a71a24c67f98","Type":"ContainerDied","Data":"0c195c9d24eb7ba0f4ed68517e69ca99afb5d85c6f9e25432ad49fa2bbbb4dd3"} Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.795494 4787 scope.go:117] "RemoveContainer" containerID="d6cd7686c6206a0fcda3a3bcfcb0c1008e467ae48af3659840efcbec80fefe41" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.795608 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bfb54f9b5-qk4t2" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.804677 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-64p9t" event={"ID":"b2cd10ec-df06-4985-a309-d61bd27c0cb7","Type":"ContainerStarted","Data":"29662081c93b7af728cfcfbd960a518ebc0b467e27dbe1770dd64d186b551e99"} Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.804732 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-64p9t" event={"ID":"b2cd10ec-df06-4985-a309-d61bd27c0cb7","Type":"ContainerStarted","Data":"d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045"} Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.828895 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-64p9t" podStartSLOduration=1.828875601 podStartE2EDuration="1.828875601s" podCreationTimestamp="2026-01-29 13:39:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:39:57.82348514 +0000 UTC m=+1436.584745406" watchObservedRunningTime="2026-01-29 13:39:57.828875601 +0000 UTC m=+1436.590135877" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.845930 4787 scope.go:117] "RemoveContainer" containerID="8b2100bbd0e7b597c6ba63187bc4b2902d0babe05931444cae6a5be05bb52020" Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.850131 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.860645 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bfb54f9b5-qk4t2"] Jan 29 13:39:57 crc kubenswrapper[4787]: I0129 13:39:57.999574 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" path="/var/lib/kubelet/pods/617d1f09-2a16-4006-9f10-a71a24c67f98/volumes" Jan 29 13:39:58 crc kubenswrapper[4787]: I0129 13:39:58.394168 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:39:58 crc kubenswrapper[4787]: I0129 13:39:58.394531 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:39:58 crc kubenswrapper[4787]: I0129 13:39:58.814476 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerStarted","Data":"ba639ee3ff7377d69eaf774b671ae0c100eaa7448c3c876737ed5dc86aecd94f"} Jan 29 13:39:59 crc kubenswrapper[4787]: I0129 13:39:59.837747 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerStarted","Data":"e36b1605a3b04ed9fbf874c7a5383659083ef860921f7d81eb3be81f257cfe63"} Jan 29 13:40:01 crc kubenswrapper[4787]: I0129 13:40:01.869057 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerStarted","Data":"a8692568c18184c00d2da8446d64f8b935631ac2dbe63cd7cc1211e1a04eae5d"} Jan 29 13:40:01 crc kubenswrapper[4787]: I0129 13:40:01.871059 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 29 13:40:01 crc kubenswrapper[4787]: I0129 13:40:01.872383 4787 generic.go:334] "Generic (PLEG): container finished" podID="b2cd10ec-df06-4985-a309-d61bd27c0cb7" containerID="29662081c93b7af728cfcfbd960a518ebc0b467e27dbe1770dd64d186b551e99" exitCode=0 Jan 29 13:40:01 crc kubenswrapper[4787]: I0129 13:40:01.872416 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-64p9t" event={"ID":"b2cd10ec-df06-4985-a309-d61bd27c0cb7","Type":"ContainerDied","Data":"29662081c93b7af728cfcfbd960a518ebc0b467e27dbe1770dd64d186b551e99"} Jan 29 13:40:01 crc kubenswrapper[4787]: I0129 13:40:01.905285 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.970183798 podStartE2EDuration="7.905268886s" podCreationTimestamp="2026-01-29 13:39:54 +0000 UTC" firstStartedPulling="2026-01-29 13:39:55.684443636 +0000 UTC m=+1434.445703922" lastFinishedPulling="2026-01-29 13:40:00.619528734 +0000 UTC m=+1439.380789010" observedRunningTime="2026-01-29 13:40:01.895339839 +0000 UTC m=+1440.656600145" watchObservedRunningTime="2026-01-29 13:40:01.905268886 +0000 UTC m=+1440.666529162" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.360211 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.386903 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.386949 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.457891 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts\") pod \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.458075 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle\") pod \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.458256 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data\") pod \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.458291 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dglzr\" (UniqueName: \"kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr\") pod \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\" (UID: \"b2cd10ec-df06-4985-a309-d61bd27c0cb7\") " Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.463549 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr" (OuterVolumeSpecName: "kube-api-access-dglzr") pod "b2cd10ec-df06-4985-a309-d61bd27c0cb7" (UID: "b2cd10ec-df06-4985-a309-d61bd27c0cb7"). InnerVolumeSpecName "kube-api-access-dglzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.463973 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts" (OuterVolumeSpecName: "scripts") pod "b2cd10ec-df06-4985-a309-d61bd27c0cb7" (UID: "b2cd10ec-df06-4985-a309-d61bd27c0cb7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.482832 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2cd10ec-df06-4985-a309-d61bd27c0cb7" (UID: "b2cd10ec-df06-4985-a309-d61bd27c0cb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.484740 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data" (OuterVolumeSpecName: "config-data") pod "b2cd10ec-df06-4985-a309-d61bd27c0cb7" (UID: "b2cd10ec-df06-4985-a309-d61bd27c0cb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.560625 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.560658 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dglzr\" (UniqueName: \"kubernetes.io/projected/b2cd10ec-df06-4985-a309-d61bd27c0cb7-kube-api-access-dglzr\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.560672 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.560681 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2cd10ec-df06-4985-a309-d61bd27c0cb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.912354 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-64p9t" Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.912398 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-64p9t" event={"ID":"b2cd10ec-df06-4985-a309-d61bd27c0cb7","Type":"ContainerDied","Data":"d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045"} Jan 29 13:40:03 crc kubenswrapper[4787]: I0129 13:40:03.912901 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8301eccb37dbcd76f20e218946fd6e02ab466fdafab7e5bcca452bd10e4d045" Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.110228 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.110537 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-log" containerID="cri-o://4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8" gracePeriod=30 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.111017 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-api" containerID="cri-o://615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b" gracePeriod=30 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.127826 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": EOF" Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.127826 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": EOF" Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.133591 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.133851 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerName="nova-scheduler-scheduler" containerID="cri-o://b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" gracePeriod=30 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.154017 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.156227 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" containerID="cri-o://a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945" gracePeriod=30 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.156481 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" containerID="cri-o://117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9" gracePeriod=30 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.925630 4787 generic.go:334] "Generic (PLEG): container finished" podID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerID="a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945" exitCode=143 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.925691 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerDied","Data":"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945"} Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.928879 4787 generic.go:334] "Generic (PLEG): container finished" podID="83de8885-fd33-40f4-a515-25b5161897ec" containerID="4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8" exitCode=143 Jan 29 13:40:04 crc kubenswrapper[4787]: I0129 13:40:04.928950 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerDied","Data":"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8"} Jan 29 13:40:05 crc kubenswrapper[4787]: E0129 13:40:05.700931 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:05 crc kubenswrapper[4787]: E0129 13:40:05.703437 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:05 crc kubenswrapper[4787]: E0129 13:40:05.705862 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:05 crc kubenswrapper[4787]: E0129 13:40:05.705945 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerName="nova-scheduler-scheduler" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.281250 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:37170->10.217.0.192:8775: read: connection reset by peer" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.281264 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:37176->10.217.0.192:8775: read: connection reset by peer" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.731269 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.837941 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vtl2\" (UniqueName: \"kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2\") pod \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.838119 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data\") pod \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.838181 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs\") pod \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.838266 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs\") pod \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.838311 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle\") pod \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\" (UID: \"9addcb14-d878-4c6e-bac1-be3e5393fb8e\") " Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.839110 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs" (OuterVolumeSpecName: "logs") pod "9addcb14-d878-4c6e-bac1-be3e5393fb8e" (UID: "9addcb14-d878-4c6e-bac1-be3e5393fb8e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.844162 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2" (OuterVolumeSpecName: "kube-api-access-5vtl2") pod "9addcb14-d878-4c6e-bac1-be3e5393fb8e" (UID: "9addcb14-d878-4c6e-bac1-be3e5393fb8e"). InnerVolumeSpecName "kube-api-access-5vtl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.872608 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data" (OuterVolumeSpecName: "config-data") pod "9addcb14-d878-4c6e-bac1-be3e5393fb8e" (UID: "9addcb14-d878-4c6e-bac1-be3e5393fb8e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.873937 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9addcb14-d878-4c6e-bac1-be3e5393fb8e" (UID: "9addcb14-d878-4c6e-bac1-be3e5393fb8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.891896 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9addcb14-d878-4c6e-bac1-be3e5393fb8e" (UID: "9addcb14-d878-4c6e-bac1-be3e5393fb8e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.940353 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9addcb14-d878-4c6e-bac1-be3e5393fb8e-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.940396 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.940410 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vtl2\" (UniqueName: \"kubernetes.io/projected/9addcb14-d878-4c6e-bac1-be3e5393fb8e-kube-api-access-5vtl2\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.940420 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.940433 4787 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9addcb14-d878-4c6e-bac1-be3e5393fb8e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.957765 4787 generic.go:334] "Generic (PLEG): container finished" podID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerID="117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9" exitCode=0 Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.957808 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerDied","Data":"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9"} Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.957839 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9addcb14-d878-4c6e-bac1-be3e5393fb8e","Type":"ContainerDied","Data":"8285dfc944598c5811aaa76bdbd24ef65fcab5e3d83912b3aa909a3ea9dbc16e"} Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.957835 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.957855 4787 scope.go:117] "RemoveContainer" containerID="117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9" Jan 29 13:40:07 crc kubenswrapper[4787]: I0129 13:40:07.981431 4787 scope.go:117] "RemoveContainer" containerID="a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.000908 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.006112 4787 scope.go:117] "RemoveContainer" containerID="117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.006582 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9\": container with ID starting with 117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9 not found: ID does not exist" containerID="117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.006611 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9"} err="failed to get container status \"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9\": rpc error: code = NotFound desc = could not find container \"117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9\": container with ID starting with 117799e7c4a856d2132646695ae4824800642735c475453d6ca3736be620c1e9 not found: ID does not exist" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.006639 4787 scope.go:117] "RemoveContainer" containerID="a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.006958 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945\": container with ID starting with a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945 not found: ID does not exist" containerID="a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.006980 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945"} err="failed to get container status \"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945\": rpc error: code = NotFound desc = could not find container \"a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945\": container with ID starting with a56da6d2432d969f8120878ceaf323a280bd11bb2a12443a69d785187dfb8945 not found: ID does not exist" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.007667 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018063 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.018487 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="init" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018505 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="init" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.018527 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018535 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.018547 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="dnsmasq-dns" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018555 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="dnsmasq-dns" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.018568 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2cd10ec-df06-4985-a309-d61bd27c0cb7" containerName="nova-manage" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018573 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2cd10ec-df06-4985-a309-d61bd27c0cb7" containerName="nova-manage" Jan 29 13:40:08 crc kubenswrapper[4787]: E0129 13:40:08.018584 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018590 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018798 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-log" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018812 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="617d1f09-2a16-4006-9f10-a71a24c67f98" containerName="dnsmasq-dns" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018827 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2cd10ec-df06-4985-a309-d61bd27c0cb7" containerName="nova-manage" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.018839 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" containerName="nova-metadata-metadata" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.019810 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.024017 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.024270 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.029370 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.145487 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkmr2\" (UniqueName: \"kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.145566 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.145587 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.145624 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.145704 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.247266 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.247434 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkmr2\" (UniqueName: \"kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.247518 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.247537 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.247967 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.248154 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.250679 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.251167 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.251535 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.261236 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkmr2\" (UniqueName: \"kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2\") pod \"nova-metadata-0\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.341153 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.784262 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.971719 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93f58b7a-13c3-49ef-8c78-a5931438cba6","Type":"ContainerStarted","Data":"221715efc1d70075a201ff1b336d7ca967d74dcf9c3ba5c93e0689478777ecd0"} Jan 29 13:40:08 crc kubenswrapper[4787]: I0129 13:40:08.971754 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93f58b7a-13c3-49ef-8c78-a5931438cba6","Type":"ContainerStarted","Data":"9eff9cb2779626095771fdd89440fbb4a53a4792e1ce293dba711c4105ee90c3"} Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.697817 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.880719 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data\") pod \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.880790 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56rz7\" (UniqueName: \"kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7\") pod \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.880851 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle\") pod \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\" (UID: \"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7\") " Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.885895 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7" (OuterVolumeSpecName: "kube-api-access-56rz7") pod "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" (UID: "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7"). InnerVolumeSpecName "kube-api-access-56rz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.911957 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" (UID: "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.919448 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data" (OuterVolumeSpecName: "config-data") pod "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" (UID: "6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.979757 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.986347 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.986407 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56rz7\" (UniqueName: \"kubernetes.io/projected/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-kube-api-access-56rz7\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:09 crc kubenswrapper[4787]: I0129 13:40:09.986439 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.005295 4787 generic.go:334] "Generic (PLEG): container finished" podID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" exitCode=0 Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.005489 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.006607 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9addcb14-d878-4c6e-bac1-be3e5393fb8e" path="/var/lib/kubelet/pods/9addcb14-d878-4c6e-bac1-be3e5393fb8e/volumes" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.013223 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7","Type":"ContainerDied","Data":"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99"} Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.013285 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7","Type":"ContainerDied","Data":"1041c45faac67130e2b5ec92ac5524d36b36e1006a5001dab346515d01256875"} Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.013309 4787 scope.go:117] "RemoveContainer" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.014097 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93f58b7a-13c3-49ef-8c78-a5931438cba6","Type":"ContainerStarted","Data":"25425c352e980ca78c9d13c8057eecdcf5a099ad60b14349a5a4e3ffcfaaeba4"} Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.028699 4787 generic.go:334] "Generic (PLEG): container finished" podID="83de8885-fd33-40f4-a515-25b5161897ec" containerID="615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b" exitCode=0 Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.028742 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerDied","Data":"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b"} Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.028765 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"83de8885-fd33-40f4-a515-25b5161897ec","Type":"ContainerDied","Data":"6b9a710ecb8e285848ab33437cfe8d7ea05166d59bc35b40b4958c39b6dfaaf5"} Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.028830 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.050651 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.050624419 podStartE2EDuration="3.050624419s" podCreationTimestamp="2026-01-29 13:40:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:40:10.047601615 +0000 UTC m=+1448.808861901" watchObservedRunningTime="2026-01-29 13:40:10.050624419 +0000 UTC m=+1448.811884715" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.060406 4787 scope.go:117] "RemoveContainer" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.064663 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99\": container with ID starting with b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99 not found: ID does not exist" containerID="b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.064707 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99"} err="failed to get container status \"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99\": rpc error: code = NotFound desc = could not find container \"b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99\": container with ID starting with b4363587c2713de0a4114d529e23fb614e07efe5101d3733c85a8fb75ce75d99 not found: ID does not exist" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.064731 4787 scope.go:117] "RemoveContainer" containerID="615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087593 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087663 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8z2x\" (UniqueName: \"kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087772 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087879 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087944 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.087976 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs\") pod \"83de8885-fd33-40f4-a515-25b5161897ec\" (UID: \"83de8885-fd33-40f4-a515-25b5161897ec\") " Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.091736 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs" (OuterVolumeSpecName: "logs") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.095495 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x" (OuterVolumeSpecName: "kube-api-access-b8z2x") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "kube-api-access-b8z2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.098305 4787 scope.go:117] "RemoveContainer" containerID="4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.098320 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.130869 4787 scope.go:117] "RemoveContainer" containerID="615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b" Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.132607 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b\": container with ID starting with 615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b not found: ID does not exist" containerID="615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.132646 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b"} err="failed to get container status \"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b\": rpc error: code = NotFound desc = could not find container \"615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b\": container with ID starting with 615ac9ff162642d51a16e059d8878e0d14718ee6955a1ba99e3733a21323897b not found: ID does not exist" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.132667 4787 scope.go:117] "RemoveContainer" containerID="4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8" Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.134610 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8\": container with ID starting with 4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8 not found: ID does not exist" containerID="4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.134657 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8"} err="failed to get container status \"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8\": rpc error: code = NotFound desc = could not find container \"4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8\": container with ID starting with 4f3887c1851b81e9a63425c97a6104a8e37ba91c99e471963c8dcf5b2d3f45c8 not found: ID does not exist" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.136665 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.136739 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data" (OuterVolumeSpecName: "config-data") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.136945 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.138024 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.140565 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "83de8885-fd33-40f4-a515-25b5161897ec" (UID: "83de8885-fd33-40f4-a515-25b5161897ec"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.145142 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.145787 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-api" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.145817 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-api" Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.145839 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-log" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.145850 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-log" Jan 29 13:40:10 crc kubenswrapper[4787]: E0129 13:40:10.145880 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerName="nova-scheduler-scheduler" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.145890 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerName="nova-scheduler-scheduler" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.146518 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-log" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.146558 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="83de8885-fd33-40f4-a515-25b5161897ec" containerName="nova-api-api" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.146572 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" containerName="nova-scheduler-scheduler" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.148068 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.151268 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.153729 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190221 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190539 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190626 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190690 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83de8885-fd33-40f4-a515-25b5161897ec-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190746 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8z2x\" (UniqueName: \"kubernetes.io/projected/83de8885-fd33-40f4-a515-25b5161897ec-kube-api-access-b8z2x\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.190807 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83de8885-fd33-40f4-a515-25b5161897ec-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.293037 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.293160 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzq7d\" (UniqueName: \"kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.293193 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.380596 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.392732 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.395234 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.395439 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzq7d\" (UniqueName: \"kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.395562 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.400740 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.401001 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.404490 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.406041 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.411987 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.412292 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.412424 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.415320 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.422037 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzq7d\" (UniqueName: \"kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d\") pod \"nova-scheduler-0\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.466024 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.496711 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.496778 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.496923 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9zwk\" (UniqueName: \"kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.497038 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.497109 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.497156 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599044 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599330 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599398 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599441 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599496 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9zwk\" (UniqueName: \"kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599542 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.599979 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.603500 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.603872 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.606095 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.609356 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.617256 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9zwk\" (UniqueName: \"kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk\") pod \"nova-api-0\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.870332 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:10 crc kubenswrapper[4787]: W0129 13:40:10.911934 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56183615_9f6d_4fc8_8ff9_4856929e5d28.slice/crio-cbd8bc309ad18fff4e2798563b3d55fb3581b8ee8057d7c0c99c30f7659d1e3d WatchSource:0}: Error finding container cbd8bc309ad18fff4e2798563b3d55fb3581b8ee8057d7c0c99c30f7659d1e3d: Status 404 returned error can't find the container with id cbd8bc309ad18fff4e2798563b3d55fb3581b8ee8057d7c0c99c30f7659d1e3d Jan 29 13:40:10 crc kubenswrapper[4787]: I0129 13:40:10.915142 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:11 crc kubenswrapper[4787]: I0129 13:40:11.049936 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"56183615-9f6d-4fc8-8ff9-4856929e5d28","Type":"ContainerStarted","Data":"cbd8bc309ad18fff4e2798563b3d55fb3581b8ee8057d7c0c99c30f7659d1e3d"} Jan 29 13:40:11 crc kubenswrapper[4787]: W0129 13:40:11.300494 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9df2172_145d_4edd_8d1c_7cc6768840bb.slice/crio-a3663a69244dbbe2eff33d12bc6d3fd8afdfbea1552c58cf5b4e7d12553ee84e WatchSource:0}: Error finding container a3663a69244dbbe2eff33d12bc6d3fd8afdfbea1552c58cf5b4e7d12553ee84e: Status 404 returned error can't find the container with id a3663a69244dbbe2eff33d12bc6d3fd8afdfbea1552c58cf5b4e7d12553ee84e Jan 29 13:40:11 crc kubenswrapper[4787]: I0129 13:40:11.303411 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:11 crc kubenswrapper[4787]: I0129 13:40:11.995921 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7" path="/var/lib/kubelet/pods/6c0308f2-b0a3-48aa-a2fc-6dbb21ef79e7/volumes" Jan 29 13:40:11 crc kubenswrapper[4787]: I0129 13:40:11.997131 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83de8885-fd33-40f4-a515-25b5161897ec" path="/var/lib/kubelet/pods/83de8885-fd33-40f4-a515-25b5161897ec/volumes" Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.071925 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"56183615-9f6d-4fc8-8ff9-4856929e5d28","Type":"ContainerStarted","Data":"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518"} Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.073354 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerStarted","Data":"083f46373caf16e71650bd1e4ebee2fe1d02f7cb3f599bbb6f51f4683a6a4fa6"} Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.073378 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerStarted","Data":"8c54aa6658f396298b1bba542f62e319b2dd09d1a2963008ba09c2366a51988b"} Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.073390 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerStarted","Data":"a3663a69244dbbe2eff33d12bc6d3fd8afdfbea1552c58cf5b4e7d12553ee84e"} Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.096359 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.096342287 podStartE2EDuration="2.096342287s" podCreationTimestamp="2026-01-29 13:40:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:40:12.088398525 +0000 UTC m=+1450.849658801" watchObservedRunningTime="2026-01-29 13:40:12.096342287 +0000 UTC m=+1450.857602563" Jan 29 13:40:12 crc kubenswrapper[4787]: I0129 13:40:12.118023 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.118004162 podStartE2EDuration="2.118004162s" podCreationTimestamp="2026-01-29 13:40:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-29 13:40:12.107406576 +0000 UTC m=+1450.868666922" watchObservedRunningTime="2026-01-29 13:40:12.118004162 +0000 UTC m=+1450.879264438" Jan 29 13:40:13 crc kubenswrapper[4787]: I0129 13:40:13.341927 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:40:13 crc kubenswrapper[4787]: I0129 13:40:13.342562 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 29 13:40:15 crc kubenswrapper[4787]: I0129 13:40:15.466736 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 29 13:40:18 crc kubenswrapper[4787]: I0129 13:40:18.342394 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:40:18 crc kubenswrapper[4787]: I0129 13:40:18.343344 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 29 13:40:19 crc kubenswrapper[4787]: I0129 13:40:19.358659 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:19 crc kubenswrapper[4787]: I0129 13:40:19.359041 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:20 crc kubenswrapper[4787]: I0129 13:40:20.467054 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 29 13:40:20 crc kubenswrapper[4787]: I0129 13:40:20.494864 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 29 13:40:20 crc kubenswrapper[4787]: I0129 13:40:20.870565 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:40:20 crc kubenswrapper[4787]: I0129 13:40:20.870624 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 29 13:40:21 crc kubenswrapper[4787]: I0129 13:40:21.191355 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 29 13:40:21 crc kubenswrapper[4787]: I0129 13:40:21.881621 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:21 crc kubenswrapper[4787]: I0129 13:40:21.881675 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:25 crc kubenswrapper[4787]: I0129 13:40:25.181584 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.807738 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.810810 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.822192 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.973641 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krdfs\" (UniqueName: \"kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.973709 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:27 crc kubenswrapper[4787]: I0129 13:40:27.973824 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.075648 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krdfs\" (UniqueName: \"kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.075722 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.075796 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.076308 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.076318 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.104213 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krdfs\" (UniqueName: \"kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs\") pod \"redhat-operators-vsd56\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.169025 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.346701 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.348857 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.358257 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.394337 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.394401 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:40:28 crc kubenswrapper[4787]: I0129 13:40:28.695183 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:28 crc kubenswrapper[4787]: W0129 13:40:28.713942 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2ab58f8_5b38_4eff_b2a9_f1c4a1090f8e.slice/crio-44f0baaef01c2f0c5a8fbc73869c04c7d525ebbbed92712b77c4d34a4231a22e WatchSource:0}: Error finding container 44f0baaef01c2f0c5a8fbc73869c04c7d525ebbbed92712b77c4d34a4231a22e: Status 404 returned error can't find the container with id 44f0baaef01c2f0c5a8fbc73869c04c7d525ebbbed92712b77c4d34a4231a22e Jan 29 13:40:29 crc kubenswrapper[4787]: I0129 13:40:29.226044 4787 generic.go:334] "Generic (PLEG): container finished" podID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerID="3f33d418440342619e6bc5d94a2117adf6e03acd2493d21c569f1dcbd130ec68" exitCode=0 Jan 29 13:40:29 crc kubenswrapper[4787]: I0129 13:40:29.227334 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerDied","Data":"3f33d418440342619e6bc5d94a2117adf6e03acd2493d21c569f1dcbd130ec68"} Jan 29 13:40:29 crc kubenswrapper[4787]: I0129 13:40:29.227367 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerStarted","Data":"44f0baaef01c2f0c5a8fbc73869c04c7d525ebbbed92712b77c4d34a4231a22e"} Jan 29 13:40:29 crc kubenswrapper[4787]: I0129 13:40:29.237440 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 29 13:40:30 crc kubenswrapper[4787]: I0129 13:40:30.877357 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:40:30 crc kubenswrapper[4787]: I0129 13:40:30.878403 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:40:30 crc kubenswrapper[4787]: I0129 13:40:30.878765 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 29 13:40:30 crc kubenswrapper[4787]: I0129 13:40:30.883694 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:40:31 crc kubenswrapper[4787]: I0129 13:40:31.242726 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 29 13:40:31 crc kubenswrapper[4787]: I0129 13:40:31.249537 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 29 13:40:32 crc kubenswrapper[4787]: I0129 13:40:32.254373 4787 generic.go:334] "Generic (PLEG): container finished" podID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerID="95b37a3998bf6f4c7bce4baa18b023a064fe28aa40f952a1e202e9e856e7dcd3" exitCode=0 Jan 29 13:40:32 crc kubenswrapper[4787]: I0129 13:40:32.254619 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerDied","Data":"95b37a3998bf6f4c7bce4baa18b023a064fe28aa40f952a1e202e9e856e7dcd3"} Jan 29 13:40:33 crc kubenswrapper[4787]: I0129 13:40:33.266167 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerStarted","Data":"8b89dd665aed0c4309515ac3a06b12b69b96df8267df5cec76c04c0f28802c98"} Jan 29 13:40:33 crc kubenswrapper[4787]: I0129 13:40:33.288363 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vsd56" podStartSLOduration=2.834374018 podStartE2EDuration="6.288341909s" podCreationTimestamp="2026-01-29 13:40:27 +0000 UTC" firstStartedPulling="2026-01-29 13:40:29.228445124 +0000 UTC m=+1467.989705400" lastFinishedPulling="2026-01-29 13:40:32.682413015 +0000 UTC m=+1471.443673291" observedRunningTime="2026-01-29 13:40:33.285932722 +0000 UTC m=+1472.047193018" watchObservedRunningTime="2026-01-29 13:40:33.288341909 +0000 UTC m=+1472.049602195" Jan 29 13:40:38 crc kubenswrapper[4787]: I0129 13:40:38.170524 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:38 crc kubenswrapper[4787]: I0129 13:40:38.171128 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:39 crc kubenswrapper[4787]: I0129 13:40:39.239820 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vsd56" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="registry-server" probeResult="failure" output=< Jan 29 13:40:39 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 13:40:39 crc kubenswrapper[4787]: > Jan 29 13:40:48 crc kubenswrapper[4787]: I0129 13:40:48.227024 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:48 crc kubenswrapper[4787]: I0129 13:40:48.286311 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:48 crc kubenswrapper[4787]: I0129 13:40:48.472134 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.016189 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.027105 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.031296 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.033383 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.078415 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkn9l\" (UniqueName: \"kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.078808 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.135530 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.141042 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.156601 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.177665 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.180533 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.180602 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkn9l\" (UniqueName: \"kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.180682 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.180712 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bngph\" (UniqueName: \"kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.181773 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.200514 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-9f5q7"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.215414 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkn9l\" (UniqueName: \"kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l\") pod \"root-account-create-update-8qnjj\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.246252 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-9f5q7"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.258181 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.259909 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.272677 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.282103 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.282150 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.282219 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4dfv\" (UniqueName: \"kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.282255 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bngph\" (UniqueName: \"kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.283318 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.294526 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.295796 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.313994 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.318031 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.318224 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="7f67df75-67c0-4609-9afe-caa099a5ad1e" containerName="openstackclient" containerID="cri-o://c36704626e89b3205c40733c06c606033c54a72189b14d4d21e965adaeaac743" gracePeriod=2 Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.328354 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bngph\" (UniqueName: \"kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph\") pod \"neutron-70cd-account-create-update-pns45\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.361615 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.383205 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.406735 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4dfv\" (UniqueName: \"kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.406808 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shwv7\" (UniqueName: \"kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.406845 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.406926 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.407886 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.428165 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.461245 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4dfv\" (UniqueName: \"kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv\") pod \"cinder-fedf-account-create-update-5g5mh\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.474681 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.505207 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vsd56" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="registry-server" containerID="cri-o://8b89dd665aed0c4309515ac3a06b12b69b96df8267df5cec76c04c0f28802c98" gracePeriod=2 Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.528504 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shwv7\" (UniqueName: \"kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.528837 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.532633 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.534583 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.570659 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.602082 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shwv7\" (UniqueName: \"kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7\") pod \"glance-e093-account-create-update-dlpwc\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: E0129 13:40:49.610255 4787 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.203:37700->38.102.83.203:43259: write tcp 38.102.83.203:37700->38.102.83.203:43259: write: broken pipe Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.627043 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:49 crc kubenswrapper[4787]: E0129 13:40:49.631277 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:49 crc kubenswrapper[4787]: E0129 13:40:49.631333 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data podName:6285155e-2d1b-4c6f-be33-5f2681a7b5e0 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:50.131317665 +0000 UTC m=+1488.892577941 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data") pod "rabbitmq-cell1-server-0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0") : configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.633708 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.638521 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-70cd-account-create-update-2xcb2"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.678554 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-70cd-account-create-update-2xcb2"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.709521 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.710143 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="openstack-network-exporter" containerID="cri-o://dc37f2e0d9cdd587ea0cfeec9b06226a2300a64f38c57be961c897d05d7498a1" gracePeriod=300 Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.744278 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fedf-account-create-update-qqj9j"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.770139 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e093-account-create-update-v7drb"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.809759 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-e093-account-create-update-v7drb"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.815434 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-fedf-account-create-update-qqj9j"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.838561 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.839117 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="openstack-network-exporter" containerID="cri-o://898e2a89c97b0d73e3b5a788305880e3b3f59cb25679762400e781c5389d9cd4" gracePeriod=300 Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.919201 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:49 crc kubenswrapper[4787]: E0129 13:40:49.919885 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f67df75-67c0-4609-9afe-caa099a5ad1e" containerName="openstackclient" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.919907 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f67df75-67c0-4609-9afe-caa099a5ad1e" containerName="openstackclient" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.920162 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f67df75-67c0-4609-9afe-caa099a5ad1e" containerName="openstackclient" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.920938 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.929812 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.955465 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:49 crc kubenswrapper[4787]: I0129 13:40:49.996841 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="ovsdbserver-sb" containerID="cri-o://76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" gracePeriod=300 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.054557 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trs8d\" (UniqueName: \"kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.054603 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.066439 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65702811-f528-4d05-a240-af4d28db992b" path="/var/lib/kubelet/pods/65702811-f528-4d05-a240-af4d28db992b/volumes" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.067861 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90a095df-6c58-487f-aa30-deca9dc23d47" path="/var/lib/kubelet/pods/90a095df-6c58-487f-aa30-deca9dc23d47/volumes" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.068563 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dad383e-4d7c-485c-8a28-cf87a91f3370" path="/var/lib/kubelet/pods/9dad383e-4d7c-485c-8a28-cf87a91f3370/volumes" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.069231 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af317c31-0e4f-4ddb-8044-a9ce9965f264" path="/var/lib/kubelet/pods/af317c31-0e4f-4ddb-8044-a9ce9965f264/volumes" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.072585 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.072625 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.075722 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-p77h8"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.075748 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-p77h8"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.075762 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.075773 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.075927 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.076105 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="ovn-northd" containerID="cri-o://f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" gracePeriod=30 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.076475 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="openstack-network-exporter" containerID="cri-o://453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b" gracePeriod=30 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.077843 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.081932 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.082145 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.124019 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.156352 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trs8d\" (UniqueName: \"kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.156419 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.157378 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: E0129 13:40:50.157440 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:50 crc kubenswrapper[4787]: E0129 13:40:50.157493 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data podName:6285155e-2d1b-4c6f-be33-5f2681a7b5e0 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:51.15748023 +0000 UTC m=+1489.918740506 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data") pod "rabbitmq-cell1-server-0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0") : configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.187442 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.187475 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="ovsdbserver-nb" containerID="cri-o://043d0ca5869b624c5c827973b5831c73dd8054e384abffecf0ed9cf48cb278f0" gracePeriod=300 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.232558 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trs8d\" (UniqueName: \"kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d\") pod \"nova-api-e613-account-create-update-mrxwq\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.265516 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e613-account-create-update-2fvc2"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.267780 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.267850 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.270539 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.271554 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwnbq\" (UniqueName: \"kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.271602 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfqps\" (UniqueName: \"kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.296567 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-e613-account-create-update-2fvc2"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.304056 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-jp978"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.347318 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-jp978"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.373895 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.373998 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.374143 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwnbq\" (UniqueName: \"kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.374208 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfqps\" (UniqueName: \"kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.375265 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.381307 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.401087 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.436199 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwnbq\" (UniqueName: \"kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq\") pod \"nova-cell1-06dc-account-create-update-xzhzd\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.447805 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfqps\" (UniqueName: \"kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps\") pod \"nova-cell0-a3c3-account-create-update-h9g5j\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.448777 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.448962 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-wpb6r" podUID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" containerName="openstack-network-exporter" containerID="cri-o://da4713d4a7d29b71f50eb1206c38223a024fe96ef15d57aa224e3edbf3ee1b4e" gracePeriod=30 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.487867 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.641751 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e91c97aa-9ab9-47e6-9821-22ee20dff312/ovsdbserver-sb/0.log" Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.641798 4787 generic.go:334] "Generic (PLEG): container finished" podID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerID="dc37f2e0d9cdd587ea0cfeec9b06226a2300a64f38c57be961c897d05d7498a1" exitCode=2 Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.641905 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerDied","Data":"dc37f2e0d9cdd587ea0cfeec9b06226a2300a64f38c57be961c897d05d7498a1"} Jan 29 13:40:50 crc kubenswrapper[4787]: I0129 13:40:50.653470 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.706647 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-bjp2c"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.718682 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.721379 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-bjp2c"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.722500 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_09f4aba5-9fa2-4e2d-ac39-e62905543d84/ovsdbserver-nb/0.log" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.722562 4787 generic.go:334] "Generic (PLEG): container finished" podID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerID="898e2a89c97b0d73e3b5a788305880e3b3f59cb25679762400e781c5389d9cd4" exitCode=2 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.722579 4787 generic.go:334] "Generic (PLEG): container finished" podID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerID="043d0ca5869b624c5c827973b5831c73dd8054e384abffecf0ed9cf48cb278f0" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.722678 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerDied","Data":"898e2a89c97b0d73e3b5a788305880e3b3f59cb25679762400e781c5389d9cd4"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.722713 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerDied","Data":"043d0ca5869b624c5c827973b5831c73dd8054e384abffecf0ed9cf48cb278f0"} Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:50.747985 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:50.748060 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:51.248043235 +0000 UTC m=+1490.009303511 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:50.774037 4787 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.203:37722->38.102.83.203:43259: write tcp 38.102.83.203:37722->38.102.83.203:43259: write: broken pipe Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.805081 4787 generic.go:334] "Generic (PLEG): container finished" podID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerID="453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b" exitCode=2 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.805136 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerDied","Data":"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.809264 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-rr4qj"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.845928 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-rr4qj"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.848172 4787 generic.go:334] "Generic (PLEG): container finished" podID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerID="8b89dd665aed0c4309515ac3a06b12b69b96df8267df5cec76c04c0f28802c98" exitCode=0 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.848198 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerDied","Data":"8b89dd665aed0c4309515ac3a06b12b69b96df8267df5cec76c04c0f28802c98"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.855788 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-z99d7"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.883394 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-z99d7"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.913186 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-j2hxl"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.925374 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-j2hxl"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.935603 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-9zlwc"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:50.960303 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-9zlwc"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.004695 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-l5s5r"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.022060 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-l5s5r"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.030173 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.030490 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="dnsmasq-dns" containerID="cri-o://740be74bb4087b9bcad6b9181de441c8a1b42db5c5831a84ddf16b1021efb323" gracePeriod=10 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.039291 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-6fbps"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.062626 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-6fbps"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.063501 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.085117 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.085381 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-69d8bc6c98-vmd8w" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-log" containerID="cri-o://7fa1b78bdd06010ec0e648c7dc942c45ba46fb8d183d57540ee5aece3a17a14d" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.085533 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-69d8bc6c98-vmd8w" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-api" containerID="cri-o://1f0d6877829ebcf7be918239787102e0a2f16c103fadf03c565be16af5f1f03a" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.098221 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.098753 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-server" containerID="cri-o://600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099136 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="swift-recon-cron" containerID="cri-o://187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099184 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="rsync" containerID="cri-o://82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099217 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-expirer" containerID="cri-o://321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099267 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-updater" containerID="cri-o://7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099306 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-auditor" containerID="cri-o://d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099336 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-replicator" containerID="cri-o://21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099367 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-server" containerID="cri-o://06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099392 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-updater" containerID="cri-o://cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099418 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-auditor" containerID="cri-o://db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099444 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-replicator" containerID="cri-o://3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099489 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-server" containerID="cri-o://44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099515 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-reaper" containerID="cri-o://edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099543 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-auditor" containerID="cri-o://939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.099568 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-replicator" containerID="cri-o://db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.158552 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.158611 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data podName:6285155e-2d1b-4c6f-be33-5f2681a7b5e0 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:53.158595022 +0000 UTC m=+1491.919855298 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data") pod "rabbitmq-cell1-server-0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0") : configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.177579 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-04f3-account-create-update-6ckbf"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.190075 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-04f3-account-create-update-6ckbf"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.202779 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-nvhcw"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.225328 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-nvhcw"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.242311 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-64p9t"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.259741 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-64p9t"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.260238 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krdfs\" (UniqueName: \"kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs\") pod \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.260423 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content\") pod \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.260513 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities\") pod \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\" (UID: \"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e\") " Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.260988 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.261025 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:52.261013863 +0000 UTC m=+1491.022274139 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.264928 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities" (OuterVolumeSpecName: "utilities") pod "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" (UID: "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.280490 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.280795 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85444c4b89-hx4zl" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-api" containerID="cri-o://4505f4e6e319771bc3829367ee02ff72fbb90f8adde43914841d18e31337e0bb" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.288098 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85444c4b89-hx4zl" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-httpd" containerID="cri-o://e1e39ec84e4c856895c68934ffe37b579202f55f241d2985ef82c3428f5a54da" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.290196 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs" (OuterVolumeSpecName: "kube-api-access-krdfs") pod "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" (UID: "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e"). InnerVolumeSpecName "kube-api-access-krdfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.342615 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.342891 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="cinder-scheduler" containerID="cri-o://70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.343295 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="probe" containerID="cri-o://7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.352838 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.198:5353: connect: connection refused" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.366810 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.366837 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krdfs\" (UniqueName: \"kubernetes.io/projected/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-kube-api-access-krdfs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.372946 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.373630 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api-log" containerID="cri-o://97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.374082 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api" containerID="cri-o://ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.384571 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-7xwdd"] Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.396825 4787 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 13:40:51 crc kubenswrapper[4787]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 13:40:51 crc kubenswrapper[4787]: + source /usr/local/bin/container-scripts/functions Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNBridge=br-int Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNRemote=tcp:localhost:6642 Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNEncapType=geneve Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNAvailabilityZones= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ EnableChassisAsGateway=true Jan 29 13:40:51 crc kubenswrapper[4787]: ++ PhysicalNetworks= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNHostName= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 13:40:51 crc kubenswrapper[4787]: ++ ovs_dir=/var/lib/openvswitch Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 13:40:51 crc kubenswrapper[4787]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + sleep 0.5 Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + cleanup_ovsdb_server_semaphore Jan 29 13:40:51 crc kubenswrapper[4787]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 13:40:51 crc kubenswrapper[4787]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-2xr6j" message=< Jan 29 13:40:51 crc kubenswrapper[4787]: Exiting ovsdb-server (5) [ OK ] Jan 29 13:40:51 crc kubenswrapper[4787]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 13:40:51 crc kubenswrapper[4787]: + source /usr/local/bin/container-scripts/functions Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNBridge=br-int Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNRemote=tcp:localhost:6642 Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNEncapType=geneve Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNAvailabilityZones= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ EnableChassisAsGateway=true Jan 29 13:40:51 crc kubenswrapper[4787]: ++ PhysicalNetworks= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNHostName= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 13:40:51 crc kubenswrapper[4787]: ++ ovs_dir=/var/lib/openvswitch Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 13:40:51 crc kubenswrapper[4787]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + sleep 0.5 Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + cleanup_ovsdb_server_semaphore Jan 29 13:40:51 crc kubenswrapper[4787]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 13:40:51 crc kubenswrapper[4787]: > Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.396858 4787 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 13:40:51 crc kubenswrapper[4787]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 29 13:40:51 crc kubenswrapper[4787]: + source /usr/local/bin/container-scripts/functions Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNBridge=br-int Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNRemote=tcp:localhost:6642 Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNEncapType=geneve Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNAvailabilityZones= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ EnableChassisAsGateway=true Jan 29 13:40:51 crc kubenswrapper[4787]: ++ PhysicalNetworks= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ OVNHostName= Jan 29 13:40:51 crc kubenswrapper[4787]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 29 13:40:51 crc kubenswrapper[4787]: ++ ovs_dir=/var/lib/openvswitch Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 29 13:40:51 crc kubenswrapper[4787]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 29 13:40:51 crc kubenswrapper[4787]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + sleep 0.5 Jan 29 13:40:51 crc kubenswrapper[4787]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 29 13:40:51 crc kubenswrapper[4787]: + cleanup_ovsdb_server_semaphore Jan 29 13:40:51 crc kubenswrapper[4787]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 29 13:40:51 crc kubenswrapper[4787]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 29 13:40:51 crc kubenswrapper[4787]: > pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" containerID="cri-o://eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.397101 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" containerID="cri-o://eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.409542 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.439254 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-7xwdd"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.443565 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.444437 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-log" containerID="cri-o://431d19c70bb7fc09c0cea13cec37421e6b58778d78ec5b6bf958f731b6ca0476" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.444574 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-httpd" containerID="cri-o://a8b8d261a49e47ed22a0cd5c563cf4143e2a75230a47767988c41d719d54d742" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.500205 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-e0f8-account-create-update-nqqt5"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.509408 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-e0f8-account-create-update-nqqt5"] Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.514299 4787 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.203:37742->38.102.83.203:43259: write tcp 38.102.83.203:37742->38.102.83.203:43259: write: broken pipe Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.548952 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.549226 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-64fc7f548f-h8fjw" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker-log" containerID="cri-o://0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.549481 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-64fc7f548f-h8fjw" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker" containerID="cri-o://01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.560132 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-pvfrj"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.578267 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" (UID: "a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.583240 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.593668 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-pvfrj"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.663032 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-vtzh4"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.663672 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" containerID="cri-o://58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" gracePeriod=29 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.672443 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.673053 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-log" containerID="cri-o://2f63dfa9d7bf21cc31d7b9d8c380ef9fbae854b8b7d202f4e7c02c9ab75414bb" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.673400 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-httpd" containerID="cri-o://e1a61b54bf10478ca80351b015706e154501c6dbfad962662f03a6e51dfe02bb" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.682771 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-vtzh4"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.689786 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.689997 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener-log" containerID="cri-o://b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.690357 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener" containerID="cri-o://03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.699340 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.705755 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-gcwmx"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.712073 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-gcwmx"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.719439 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.725452 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.725740 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5687c787c6-cdl5t" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api-log" containerID="cri-o://1409e2db8a0ffd4d94c91a11022596ab339a612fb26e6871a45ae7940cf15a10" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.726238 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5687c787c6-cdl5t" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api" containerID="cri-o://483fbd8d88259f502b0982cdbf412937c30e55091fb48778417b91b9a155bfca" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.734401 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.747214 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.747662 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-log" containerID="cri-o://8c54aa6658f396298b1bba542f62e319b2dd09d1a2963008ba09c2366a51988b" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.748035 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-api" containerID="cri-o://083f46373caf16e71650bd1e4ebee2fe1d02f7cb3f599bbb6f51f4683a6a4fa6" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.759618 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-65b45"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.770644 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-65b45"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.787498 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.802485 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.807403 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-24xfm"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.815538 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-24xfm"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.821376 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.837059 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dc6vm"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.846698 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="rabbitmq" containerID="cri-o://19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4" gracePeriod=604800 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.866229 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dc6vm"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.877690 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.877912 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" containerID="cri-o://221715efc1d70075a201ff1b336d7ca967d74dcf9c3ba5c93e0689478777ecd0" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.878320 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" containerID="cri-o://25425c352e980ca78c9d13c8057eecdcf5a099ad60b14349a5a4e3ffcfaaeba4" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.886418 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e91c97aa-9ab9-47e6-9821-22ee20dff312/ovsdbserver-sb/0.log" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.886471 4787 generic.go:334] "Generic (PLEG): container finished" podID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerID="76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.886517 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerDied","Data":"76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.888445 4787 generic.go:334] "Generic (PLEG): container finished" podID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerID="1409e2db8a0ffd4d94c91a11022596ab339a612fb26e6871a45ae7940cf15a10" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.888510 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerDied","Data":"1409e2db8a0ffd4d94c91a11022596ab339a612fb26e6871a45ae7940cf15a10"} Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.895997 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80 is running failed: container process not found" containerID="76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.896728 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80 is running failed: container process not found" containerID="76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.897084 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80 is running failed: container process not found" containerID="76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 29 13:40:51 crc kubenswrapper[4787]: E0129 13:40:51.897106 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="ovsdbserver-sb" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.897767 4787 generic.go:334] "Generic (PLEG): container finished" podID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerID="2f63dfa9d7bf21cc31d7b9d8c380ef9fbae854b8b7d202f4e7c02c9ab75414bb" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.897817 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerDied","Data":"2f63dfa9d7bf21cc31d7b9d8c380ef9fbae854b8b7d202f4e7c02c9ab75414bb"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.899880 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vsd56" event={"ID":"a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e","Type":"ContainerDied","Data":"44f0baaef01c2f0c5a8fbc73869c04c7d525ebbbed92712b77c4d34a4231a22e"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.899908 4787 scope.go:117] "RemoveContainer" containerID="8b89dd665aed0c4309515ac3a06b12b69b96df8267df5cec76c04c0f28802c98" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.900023 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vsd56" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.910381 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.916071 4787 generic.go:334] "Generic (PLEG): container finished" podID="7f67df75-67c0-4609-9afe-caa099a5ad1e" containerID="c36704626e89b3205c40733c06c606033c54a72189b14d4d21e965adaeaac743" exitCode=137 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.932957 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.933200 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ab278964-ff72-4353-b454-9587f235c492" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9" gracePeriod=30 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.939138 4787 generic.go:334] "Generic (PLEG): container finished" podID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerID="0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.939230 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerDied","Data":"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.944585 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.946327 4787 generic.go:334] "Generic (PLEG): container finished" podID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerID="431d19c70bb7fc09c0cea13cec37421e6b58778d78ec5b6bf958f731b6ca0476" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.946374 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a","Type":"ContainerDied","Data":"431d19c70bb7fc09c0cea13cec37421e6b58778d78ec5b6bf958f731b6ca0476"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.948106 4787 generic.go:334] "Generic (PLEG): container finished" podID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerID="e1e39ec84e4c856895c68934ffe37b579202f55f241d2985ef82c3428f5a54da" exitCode=0 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.948142 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerDied","Data":"e1e39ec84e4c856895c68934ffe37b579202f55f241d2985ef82c3428f5a54da"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.951265 4787 scope.go:117] "RemoveContainer" containerID="95b37a3998bf6f4c7bce4baa18b023a064fe28aa40f952a1e202e9e856e7dcd3" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.952260 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wpb6r_1a18d2e9-35be-4d8f-9d13-08296cfa2963/openstack-network-exporter/0.log" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.952280 4787 generic.go:334] "Generic (PLEG): container finished" podID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" containerID="da4713d4a7d29b71f50eb1206c38223a024fe96ef15d57aa224e3edbf3ee1b4e" exitCode=2 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.952311 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wpb6r" event={"ID":"1a18d2e9-35be-4d8f-9d13-08296cfa2963","Type":"ContainerDied","Data":"da4713d4a7d29b71f50eb1206c38223a024fe96ef15d57aa224e3edbf3ee1b4e"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.973940 4787 generic.go:334] "Generic (PLEG): container finished" podID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerID="97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.974004 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerDied","Data":"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a"} Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.981414 4787 scope.go:117] "RemoveContainer" containerID="3f33d418440342619e6bc5d94a2117adf6e03acd2493d21c569f1dcbd130ec68" Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.985984 4787 generic.go:334] "Generic (PLEG): container finished" podID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerID="b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c" exitCode=143 Jan 29 13:40:51 crc kubenswrapper[4787]: I0129 13:40:51.995016 4787 generic.go:334] "Generic (PLEG): container finished" podID="87eff82d-823f-44a9-b96b-fed35701c54b" containerID="7fa1b78bdd06010ec0e648c7dc942c45ba46fb8d183d57540ee5aece3a17a14d" exitCode=143 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.009351 4787 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/nova-cell1-conductor-0" secret="" err="secret \"nova-nova-dockercfg-wbn2g\" not found" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.027933 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="012c4748-24f7-48bd-983a-481f3f544724" path="/var/lib/kubelet/pods/012c4748-24f7-48bd-983a-481f3f544724/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.028521 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05d80766-0024-4274-934c-0c6e206e5de0" path="/var/lib/kubelet/pods/05d80766-0024-4274-934c-0c6e206e5de0/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.029145 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a5645bd-0519-4e87-850a-7cd4c72bd0c1" path="/var/lib/kubelet/pods/0a5645bd-0519-4e87-850a-7cd4c72bd0c1/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054525 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054556 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054563 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054572 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054580 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054589 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054595 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054601 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054607 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054614 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054621 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054627 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054632 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.054639 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.055911 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="120db9b0-7739-4e28-ba21-4f8bedf3a8d8" path="/var/lib/kubelet/pods/120db9b0-7739-4e28-ba21-4f8bedf3a8d8/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.056751 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c0e8878-777a-4637-906a-c23cd622a9ee" path="/var/lib/kubelet/pods/3c0e8878-777a-4637-906a-c23cd622a9ee/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.057273 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="432d7a0e-772b-4a31-907f-d3c5b9bfe12a" path="/var/lib/kubelet/pods/432d7a0e-772b-4a31-907f-d3c5b9bfe12a/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.057827 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="558f12be-14a0-43bb-9f88-98f6ddafa81f" path="/var/lib/kubelet/pods/558f12be-14a0-43bb-9f88-98f6ddafa81f/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.057958 4787 generic.go:334] "Generic (PLEG): container finished" podID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.059141 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59009fab-52fa-4e28-a87e-2fa7a49d9f7d" path="/var/lib/kubelet/pods/59009fab-52fa-4e28-a87e-2fa7a49d9f7d/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.059737 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5955ac52-7d5b-4d18-95c2-c733b868af76" path="/var/lib/kubelet/pods/5955ac52-7d5b-4d18-95c2-c733b868af76/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.086393 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62924dc0-4190-4229-a277-6a3a1f775498" path="/var/lib/kubelet/pods/62924dc0-4190-4229-a277-6a3a1f775498/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.087223 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63ca8415-8ac5-4c3d-9fca-98a46e8a6da7" path="/var/lib/kubelet/pods/63ca8415-8ac5-4c3d-9fca-98a46e8a6da7/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.087912 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91b2f9a1-513a-4fe0-8319-daf196d2afd8" path="/var/lib/kubelet/pods/91b2f9a1-513a-4fe0-8319-daf196d2afd8/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.089201 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7a32d77-75e8-4777-9ace-f29730eb8e4d" path="/var/lib/kubelet/pods/a7a32d77-75e8-4777-9ace-f29730eb8e4d/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.092712 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7df2fd4-19d7-4610-ad5a-9738f142c562" path="/var/lib/kubelet/pods/a7df2fd4-19d7-4610-ad5a-9738f142c562/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.094781 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af36f69e-989c-40df-b146-df2168789b88" path="/var/lib/kubelet/pods/af36f69e-989c-40df-b146-df2168789b88/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.097351 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2cd10ec-df06-4985-a309-d61bd27c0cb7" path="/var/lib/kubelet/pods/b2cd10ec-df06-4985-a309-d61bd27c0cb7/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.098393 4787 secret.go:188] Couldn't get secret openstack/nova-cell1-conductor-config-data: secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.098444 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data podName:f73803d0-ec9b-4483-a509-7bff9afb1d85 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:52.598428102 +0000 UTC m=+1491.359688378 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data") pod "nova-cell1-conductor-0" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85") : secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.103850 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbc05e24-bbf4-44e2-9cd3-40c095f56aea" path="/var/lib/kubelet/pods/cbc05e24-bbf4-44e2-9cd3-40c095f56aea/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.105686 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2d6dbc2-04fe-4797-818d-fb90c0ab7287" path="/var/lib/kubelet/pods/e2d6dbc2-04fe-4797-818d-fb90c0ab7287/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.106485 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f021b258-9578-4a25-af1f-2456434d0cda" path="/var/lib/kubelet/pods/f021b258-9578-4a25-af1f-2456434d0cda/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.107698 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f93a192d-1d61-41b8-aef2-d8badc0cb9df" path="/var/lib/kubelet/pods/f93a192d-1d61-41b8-aef2-d8badc0cb9df/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.108651 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdeb5cf3-4f9a-4f9d-8559-648f5079397a" path="/var/lib/kubelet/pods/fdeb5cf3-4f9a-4f9d-8559-648f5079397a/volumes" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109289 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vnpkm"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109404 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerDied","Data":"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109553 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerDied","Data":"7fa1b78bdd06010ec0e648c7dc942c45ba46fb8d183d57540ee5aece3a17a14d"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109612 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109685 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vnpkm"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109773 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109842 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-b47q7"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109892 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109942 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.109995 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-b47q7"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110047 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110112 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110171 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110222 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110272 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.110321 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125608 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125657 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125671 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125683 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125702 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125716 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125726 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125737 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125749 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerDied","Data":"eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125764 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" event={"ID":"aa154084-240a-486e-9eb1-21620d97ec8d","Type":"ContainerDied","Data":"740be74bb4087b9bcad6b9181de441c8a1b42db5c5831a84ddf16b1021efb323"} Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.125778 4787 generic.go:334] "Generic (PLEG): container finished" podID="aa154084-240a-486e-9eb1-21620d97ec8d" containerID="740be74bb4087b9bcad6b9181de441c8a1b42db5c5831a84ddf16b1021efb323" exitCode=0 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.132512 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerName="nova-scheduler-scheduler" containerID="cri-o://29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" gracePeriod=30 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.116163 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="52053f33-608f-4f1e-9432-baece90d08fb" containerName="nova-cell0-conductor-conductor" containerID="cri-o://2b304ab8c2c786d238ef41d0439f6a9dcc42e20c02b9de41cde375bfb43bf8a1" gracePeriod=30 Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.136115 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "neutron" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="neutron" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.138186 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-70cd-account-create-update-pns45" podUID="ac650857-a714-4612-90b0-a8dada6949bb" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.140583 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.142663 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-8qnjj" podUID="6caca38d-9421-4a65-8e5e-ddc0343460c2" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.157628 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vsd56"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.165362 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.166149 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e91c97aa-9ab9-47e6-9821-22ee20dff312/ovsdbserver-sb/0.log" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.166216 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.182647 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.193281 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_09f4aba5-9fa2-4e2d-ac39-e62905543d84/ovsdbserver-nb/0.log" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.193366 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.229708 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.304973 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305021 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8z9m\" (UniqueName: \"kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305100 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305142 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305184 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305218 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305243 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305278 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glbbf\" (UniqueName: \"kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305298 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305338 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305357 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305380 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305416 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305446 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305495 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305514 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qptsp\" (UniqueName: \"kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305542 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305580 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305623 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305663 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir\") pod \"e91c97aa-9ab9-47e6-9821-22ee20dff312\" (UID: \"e91c97aa-9ab9-47e6-9821-22ee20dff312\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305678 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs\") pod \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\" (UID: \"09f4aba5-9fa2-4e2d-ac39-e62905543d84\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.305699 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb\") pod \"aa154084-240a-486e-9eb1-21620d97ec8d\" (UID: \"aa154084-240a-486e-9eb1-21620d97ec8d\") " Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.306179 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.306231 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:54.306217506 +0000 UTC m=+1493.067477782 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.306993 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config" (OuterVolumeSpecName: "config") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.307442 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts" (OuterVolumeSpecName: "scripts") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.307964 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts" (OuterVolumeSpecName: "scripts") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.311347 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.311601 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.312588 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config" (OuterVolumeSpecName: "config") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.320306 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.326157 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp" (OuterVolumeSpecName: "kube-api-access-qptsp") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "kube-api-access-qptsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.326207 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m" (OuterVolumeSpecName: "kube-api-access-j8z9m") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "kube-api-access-j8z9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.334715 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf" (OuterVolumeSpecName: "kube-api-access-glbbf") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "kube-api-access-glbbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.407582 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.410928 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.410975 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411037 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411051 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411064 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e91c97aa-9ab9-47e6-9821-22ee20dff312-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411077 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411091 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qptsp\" (UniqueName: \"kubernetes.io/projected/aa154084-240a-486e-9eb1-21620d97ec8d-kube-api-access-qptsp\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411103 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411114 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8z9m\" (UniqueName: \"kubernetes.io/projected/09f4aba5-9fa2-4e2d-ac39-e62905543d84-kube-api-access-j8z9m\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411126 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f4aba5-9fa2-4e2d-ac39-e62905543d84-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.411137 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glbbf\" (UniqueName: \"kubernetes.io/projected/e91c97aa-9ab9-47e6-9821-22ee20dff312-kube-api-access-glbbf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.434162 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="galera" containerID="cri-o://b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" gracePeriod=30 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.437796 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.438020 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-httpd" containerID="cri-o://590a7dfe28927af5962f958eb03c0de73f3909c4b35a1e96d83e4eb1b3065948" gracePeriod=30 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.438154 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-server" containerID="cri-o://0486530403f0ac601abe5de0a5af2b6b1794e8ce8171f91595d95c8ce17b47c4" gracePeriod=30 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.461593 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.470514 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.484905 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config" (OuterVolumeSpecName: "config") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.485140 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.492589 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.502716 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.503316 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513652 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513694 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513707 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513718 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513729 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513741 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.513751 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.537591 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.542837 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.571889 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.581558 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa154084-240a-486e-9eb1-21620d97ec8d" (UID: "aa154084-240a-486e-9eb1-21620d97ec8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.616763 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.616799 4787 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.616812 4787 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa154084-240a-486e-9eb1-21620d97ec8d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.616822 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.616912 4787 secret.go:188] Couldn't get secret openstack/nova-cell1-conductor-config-data: secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.617354 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "09f4aba5-9fa2-4e2d-ac39-e62905543d84" (UID: "09f4aba5-9fa2-4e2d-ac39-e62905543d84"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.617407 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data podName:f73803d0-ec9b-4483-a509-7bff9afb1d85 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:53.617387957 +0000 UTC m=+1492.378648233 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data") pod "nova-cell1-conductor-0" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85") : secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.647568 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wpb6r_1a18d2e9-35be-4d8f-9d13-08296cfa2963/openstack-network-exporter/0.log" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.647648 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.661553 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e91c97aa-9ab9-47e6-9821-22ee20dff312" (UID: "e91c97aa-9ab9-47e6-9821-22ee20dff312"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.670717 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.701730 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.707115 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "glance" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="glance" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.707521 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.708910 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"glance-db-secret\\\" not found\"" pod="openstack/glance-e093-account-create-update-dlpwc" podUID="05e445c0-f43c-4cc7-854d-fa9bfe184d2e" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.722241 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.722292 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snp6f\" (UniqueName: \"kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723098 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msd4x\" (UniqueName: \"kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723172 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config\") pod \"7f67df75-67c0-4609-9afe-caa099a5ad1e\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723212 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dbjf\" (UniqueName: \"kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf\") pod \"7f67df75-67c0-4609-9afe-caa099a5ad1e\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723261 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723303 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723329 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret\") pod \"7f67df75-67c0-4609-9afe-caa099a5ad1e\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723376 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle\") pod \"7f67df75-67c0-4609-9afe-caa099a5ad1e\" (UID: \"7f67df75-67c0-4609-9afe-caa099a5ad1e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723428 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723475 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723494 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723513 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723541 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723568 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir\") pod \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\" (UID: \"1a18d2e9-35be-4d8f-9d13-08296cfa2963\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.723625 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle\") pod \"d9df3779-71e8-4441-a410-d4fe2fb2267e\" (UID: \"d9df3779-71e8-4441-a410-d4fe2fb2267e\") " Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.724326 4787 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/09f4aba5-9fa2-4e2d-ac39-e62905543d84-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.724346 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e91c97aa-9ab9-47e6-9821-22ee20dff312-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.726834 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.728811 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.730560 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.731497 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config" (OuterVolumeSpecName: "config") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.732302 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf" (OuterVolumeSpecName: "kube-api-access-2dbjf") pod "7f67df75-67c0-4609-9afe-caa099a5ad1e" (UID: "7f67df75-67c0-4609-9afe-caa099a5ad1e"). InnerVolumeSpecName "kube-api-access-2dbjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.737755 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.747020 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.748667 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f" (OuterVolumeSpecName: "kube-api-access-snp6f") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "kube-api-access-snp6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.748835 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts" (OuterVolumeSpecName: "scripts") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.751985 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.752770 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x" (OuterVolumeSpecName: "kube-api-access-msd4x") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "kube-api-access-msd4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.795989 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "7f67df75-67c0-4609-9afe-caa099a5ad1e" (UID: "7f67df75-67c0-4609-9afe-caa099a5ad1e"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.799958 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "nova_api" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="nova_api" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.804684 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-e613-account-create-update-mrxwq" podUID="6cba6e64-0710-4f92-aca6-f141b4ebcaea" Jan 29 13:40:52 crc kubenswrapper[4787]: W0129 13:40:52.813299 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6256c29_7af4_4921_b494_ef3a2e5e976f.slice/crio-206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621 WatchSource:0}: Error finding container 206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621: Status 404 returned error can't find the container with id 206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621 Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.819470 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853796 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853826 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853835 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853844 4787 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d9df3779-71e8-4441-a410-d4fe2fb2267e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853852 4787 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1a18d2e9-35be-4d8f-9d13-08296cfa2963-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853860 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853868 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snp6f\" (UniqueName: \"kubernetes.io/projected/d9df3779-71e8-4441-a410-d4fe2fb2267e-kube-api-access-snp6f\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853877 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msd4x\" (UniqueName: \"kubernetes.io/projected/1a18d2e9-35be-4d8f-9d13-08296cfa2963-kube-api-access-msd4x\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853888 4787 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853896 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dbjf\" (UniqueName: \"kubernetes.io/projected/7f67df75-67c0-4609-9afe-caa099a5ad1e-kube-api-access-2dbjf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.853904 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a18d2e9-35be-4d8f-9d13-08296cfa2963-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.854750 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "7f67df75-67c0-4609-9afe-caa099a5ad1e" (UID: "7f67df75-67c0-4609-9afe-caa099a5ad1e"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.859117 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1a18d2e9-35be-4d8f-9d13-08296cfa2963" (UID: "1a18d2e9-35be-4d8f-9d13-08296cfa2963"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.860218 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.860370 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "cinder" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="cinder" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.861515 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-fedf-account-create-update-5g5mh" podUID="e6256c29-7af4-4921-b494-ef3a2e5e976f" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.875898 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.883332 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.885959 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f67df75-67c0-4609-9afe-caa099a5ad1e" (UID: "7f67df75-67c0-4609-9afe-caa099a5ad1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:52 crc kubenswrapper[4787]: W0129 13:40:52.894389 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05668608_1ac5_4376_b0ef_0ec5604136a1.slice/crio-6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26 WatchSource:0}: Error finding container 6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26: Status 404 returned error can't find the container with id 6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26 Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.898188 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "nova_cell0" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="nova_cell0" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.899549 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" podUID="05668608-1ac5-4376-b0ef-0ec5604136a1" Jan 29 13:40:52 crc kubenswrapper[4787]: W0129 13:40:52.900875 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69319341_ad07_4fcc_a65a_585f389382ab.slice/crio-0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5 WatchSource:0}: Error finding container 0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5: Status 404 returned error can't find the container with id 0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5 Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.918866 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:52 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: if [ -n "nova_cell1" ]; then Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="nova_cell1" Jan 29 13:40:52 crc kubenswrapper[4787]: else Jan 29 13:40:52 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:52 crc kubenswrapper[4787]: fi Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:52 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:52 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:52 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:52 crc kubenswrapper[4787]: # support updates Jan 29 13:40:52 crc kubenswrapper[4787]: Jan 29 13:40:52 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:52 crc kubenswrapper[4787]: E0129 13:40:52.920073 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" podUID="69319341-ad07-4fcc-a65a-585f389382ab" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.958785 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.958811 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a18d2e9-35be-4d8f-9d13-08296cfa2963-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.958822 4787 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.958830 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f67df75-67c0-4609-9afe-caa099a5ad1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:52 crc kubenswrapper[4787]: I0129 13:40:52.965740 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data" (OuterVolumeSpecName: "config-data") pod "d9df3779-71e8-4441-a410-d4fe2fb2267e" (UID: "d9df3779-71e8-4441-a410-d4fe2fb2267e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.039719 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.045816 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.050145 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.050212 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="ovn-northd" Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.053279 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.056385 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.057919 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.057988 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="galera" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.060661 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9df3779-71e8-4441-a410-d4fe2fb2267e-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.089877 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.162726 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs\") pod \"ab278964-ff72-4353-b454-9587f235c492\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.162821 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data\") pod \"ab278964-ff72-4353-b454-9587f235c492\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.162879 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs\") pod \"ab278964-ff72-4353-b454-9587f235c492\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.162904 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle\") pod \"ab278964-ff72-4353-b454-9587f235c492\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.162937 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sds94\" (UniqueName: \"kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94\") pod \"ab278964-ff72-4353-b454-9587f235c492\" (UID: \"ab278964-ff72-4353-b454-9587f235c492\") " Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.163428 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.163506 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data podName:6285155e-2d1b-4c6f-be33-5f2681a7b5e0 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:57.163487941 +0000 UTC m=+1495.924748217 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data") pod "rabbitmq-cell1-server-0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0") : configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.163879 4787 generic.go:334] "Generic (PLEG): container finished" podID="52053f33-608f-4f1e-9432-baece90d08fb" containerID="2b304ab8c2c786d238ef41d0439f6a9dcc42e20c02b9de41cde375bfb43bf8a1" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.164055 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"52053f33-608f-4f1e-9432-baece90d08fb","Type":"ContainerDied","Data":"2b304ab8c2c786d238ef41d0439f6a9dcc42e20c02b9de41cde375bfb43bf8a1"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.167063 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94" (OuterVolumeSpecName: "kube-api-access-sds94") pod "ab278964-ff72-4353-b454-9587f235c492" (UID: "ab278964-ff72-4353-b454-9587f235c492"). InnerVolumeSpecName "kube-api-access-sds94". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.179555 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e093-account-create-update-dlpwc" event={"ID":"05e445c0-f43c-4cc7-854d-fa9bfe184d2e","Type":"ContainerStarted","Data":"5e449393cbee7259a3444b79aecb02ff40fafb05e7eb788367f9bcf41d77d2c1"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.184030 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-mrxwq" event={"ID":"6cba6e64-0710-4f92-aca6-f141b4ebcaea","Type":"ContainerStarted","Data":"f2c9cfa02f2ac116bd49ab90325cc22bca1cedcd024e432fd53489effb8cff09"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.192133 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab278964-ff72-4353-b454-9587f235c492" (UID: "ab278964-ff72-4353-b454-9587f235c492"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.197749 4787 scope.go:117] "RemoveContainer" containerID="c36704626e89b3205c40733c06c606033c54a72189b14d4d21e965adaeaac743" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.197903 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.221174 4787 generic.go:334] "Generic (PLEG): container finished" podID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerID="0486530403f0ac601abe5de0a5af2b6b1794e8ce8171f91595d95c8ce17b47c4" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.221209 4787 generic.go:334] "Generic (PLEG): container finished" podID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerID="590a7dfe28927af5962f958eb03c0de73f3909c4b35a1e96d83e4eb1b3065948" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.221252 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerDied","Data":"0486530403f0ac601abe5de0a5af2b6b1794e8ce8171f91595d95c8ce17b47c4"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.221276 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerDied","Data":"590a7dfe28927af5962f958eb03c0de73f3909c4b35a1e96d83e4eb1b3065948"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.229662 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data" (OuterVolumeSpecName: "config-data") pod "ab278964-ff72-4353-b454-9587f235c492" (UID: "ab278964-ff72-4353-b454-9587f235c492"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.246776 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-pns45" event={"ID":"ac650857-a714-4612-90b0-a8dada6949bb","Type":"ContainerStarted","Data":"60e2e0aa0fd318196d493910f6a0489b4e9b9b6f3924b219b9c47dcaaa6f833d"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.257880 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "ab278964-ff72-4353-b454-9587f235c492" (UID: "ab278964-ff72-4353-b454-9587f235c492"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.268754 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sds94\" (UniqueName: \"kubernetes.io/projected/ab278964-ff72-4353-b454-9587f235c492-kube-api-access-sds94\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.268784 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.268793 4787 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.268802 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.274726 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "ab278964-ff72-4353-b454-9587f235c492" (UID: "ab278964-ff72-4353-b454-9587f235c492"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.338808 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_e91c97aa-9ab9-47e6-9821-22ee20dff312/ovsdbserver-sb/0.log" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.338881 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"e91c97aa-9ab9-47e6-9821-22ee20dff312","Type":"ContainerDied","Data":"8cdaa16f73cf5fdfa4959221a0456ebdfb93648708bc35db169fadd941dd3c46"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.338974 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.356705 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" event={"ID":"05668608-1ac5-4376-b0ef-0ec5604136a1","Type":"ContainerStarted","Data":"6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.367295 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_09f4aba5-9fa2-4e2d-ac39-e62905543d84/ovsdbserver-nb/0.log" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.367557 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"09f4aba5-9fa2-4e2d-ac39-e62905543d84","Type":"ContainerDied","Data":"dd14c5768d74958787edaa836d7c2b1953728c25d91145ffdb2c08ce3af53bdf"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.367724 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.370263 4787 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab278964-ff72-4353-b454-9587f235c492-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400223 4787 generic.go:334] "Generic (PLEG): container finished" podID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerID="7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400399 4787 generic.go:334] "Generic (PLEG): container finished" podID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerID="70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400568 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerDied","Data":"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400643 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerDied","Data":"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400695 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d9df3779-71e8-4441-a410-d4fe2fb2267e","Type":"ContainerDied","Data":"6f3bf64a03ab83f7f7c99e9c24292e51e46c181bb084e6c70f19e1f08178c01e"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.400803 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.411931 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" event={"ID":"69319341-ad07-4fcc-a65a-585f389382ab","Type":"ContainerStarted","Data":"0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.420385 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" event={"ID":"aa154084-240a-486e-9eb1-21620d97ec8d","Type":"ContainerDied","Data":"fe52aaca207e8bab15c96c8b86d9d27b7a3d6136026caf80f3401544f646b01f"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.420663 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-867cd545c7-4q8zf" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.424363 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fedf-account-create-update-5g5mh" event={"ID":"e6256c29-7af4-4921-b494-ef3a2e5e976f","Type":"ContainerStarted","Data":"206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.438433 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wpb6r_1a18d2e9-35be-4d8f-9d13-08296cfa2963/openstack-network-exporter/0.log" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.438536 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wpb6r" event={"ID":"1a18d2e9-35be-4d8f-9d13-08296cfa2963","Type":"ContainerDied","Data":"44a951af89e5870f79973e69b646b34901416c5ba1ac974713e2e57603818127"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.438613 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wpb6r" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.447837 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8qnjj" event={"ID":"6caca38d-9421-4a65-8e5e-ddc0343460c2","Type":"ContainerStarted","Data":"7417f0da9084cdbf95b5da986c8c1337229223aa4ccbfc9a59d44c79409e0077"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.478698 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.478696 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab278964-ff72-4353-b454-9587f235c492","Type":"ContainerDied","Data":"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.479064 4787 generic.go:334] "Generic (PLEG): container finished" podID="ab278964-ff72-4353-b454-9587f235c492" containerID="a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9" exitCode=0 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.479161 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab278964-ff72-4353-b454-9587f235c492","Type":"ContainerDied","Data":"3a40a7418a1a2b18aade4acbc66b2daaae6cb310e1db6ef891ea602f4149f10c"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.487802 4787 generic.go:334] "Generic (PLEG): container finished" podID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerID="8c54aa6658f396298b1bba542f62e319b2dd09d1a2963008ba09c2366a51988b" exitCode=143 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.487862 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerDied","Data":"8c54aa6658f396298b1bba542f62e319b2dd09d1a2963008ba09c2366a51988b"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.498145 4787 generic.go:334] "Generic (PLEG): container finished" podID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerID="221715efc1d70075a201ff1b336d7ca967d74dcf9c3ba5c93e0689478777ecd0" exitCode=143 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.498782 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerName="nova-cell1-conductor-conductor" containerID="cri-o://8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" gracePeriod=30 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.499204 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93f58b7a-13c3-49ef-8c78-a5931438cba6","Type":"ContainerDied","Data":"221715efc1d70075a201ff1b336d7ca967d74dcf9c3ba5c93e0689478777ecd0"} Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.633814 4787 scope.go:117] "RemoveContainer" containerID="dc37f2e0d9cdd587ea0cfeec9b06226a2300a64f38c57be961c897d05d7498a1" Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.635526 4787 secret.go:188] Couldn't get secret openstack/nova-cell1-conductor-config-data: secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.635911 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530 is running failed: container process not found" containerID="017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.645711 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data podName:f73803d0-ec9b-4483-a509-7bff9afb1d85 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:55.643659181 +0000 UTC m=+1494.404919457 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data") pod "nova-cell1-conductor-0" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85") : secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.646021 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530 is running failed: container process not found" containerID="017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.646119 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.647824 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530 is running failed: container process not found" containerID="017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.647928 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-hz6gf" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.656536 4787 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 13:40:53 crc kubenswrapper[4787]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T13:40:51Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 13:40:53 crc kubenswrapper[4787]: /etc/init.d/functions: line 589: 407 Alarm clock "$@" Jan 29 13:40:53 crc kubenswrapper[4787]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-hz6gf" message=< Jan 29 13:40:53 crc kubenswrapper[4787]: Exiting ovn-controller (1) [FAILED] Jan 29 13:40:53 crc kubenswrapper[4787]: Killing ovn-controller (1) [ OK ] Jan 29 13:40:53 crc kubenswrapper[4787]: 2026-01-29T13:40:51Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 13:40:53 crc kubenswrapper[4787]: /etc/init.d/functions: line 589: 407 Alarm clock "$@" Jan 29 13:40:53 crc kubenswrapper[4787]: > Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.656573 4787 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 13:40:53 crc kubenswrapper[4787]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2026-01-29T13:40:51Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Jan 29 13:40:53 crc kubenswrapper[4787]: /etc/init.d/functions: line 589: 407 Alarm clock "$@" Jan 29 13:40:53 crc kubenswrapper[4787]: > pod="openstack/ovn-controller-hz6gf" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" containerID="cri-o://017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.656612 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-hz6gf" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" containerID="cri-o://017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" gracePeriod=27 Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.666846 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.673009 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.673355 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.673693 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.673792 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.674116 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.675215 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.677444 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.677828 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:53 crc kubenswrapper[4787]: E0129 13:40:53.677918 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.703284 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.703336 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-867cd545c7-4q8zf"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.713820 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.722217 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.727990 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.743740 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.748117 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.760848 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-wpb6r"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.762824 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.767439 4787 scope.go:117] "RemoveContainer" containerID="76a0d1cef4d08d59134316c082afa83d88d0e7e5d29677922505332f63613b80" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.771571 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.854166 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.861844 4787 scope.go:117] "RemoveContainer" containerID="898e2a89c97b0d73e3b5a788305880e3b3f59cb25679762400e781c5389d9cd4" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.897324 4787 scope.go:117] "RemoveContainer" containerID="043d0ca5869b624c5c827973b5831c73dd8054e384abffecf0ed9cf48cb278f0" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.946059 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data\") pod \"52053f33-608f-4f1e-9432-baece90d08fb\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.946141 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zn9w\" (UniqueName: \"kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w\") pod \"52053f33-608f-4f1e-9432-baece90d08fb\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.946179 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle\") pod \"52053f33-608f-4f1e-9432-baece90d08fb\" (UID: \"52053f33-608f-4f1e-9432-baece90d08fb\") " Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.953837 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w" (OuterVolumeSpecName: "kube-api-access-8zn9w") pod "52053f33-608f-4f1e-9432-baece90d08fb" (UID: "52053f33-608f-4f1e-9432-baece90d08fb"). InnerVolumeSpecName "kube-api-access-8zn9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:53 crc kubenswrapper[4787]: I0129 13:40:53.974832 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data" (OuterVolumeSpecName: "config-data") pod "52053f33-608f-4f1e-9432-baece90d08fb" (UID: "52053f33-608f-4f1e-9432-baece90d08fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.003643 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52053f33-608f-4f1e-9432-baece90d08fb" (UID: "52053f33-608f-4f1e-9432-baece90d08fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.004672 4787 scope.go:117] "RemoveContainer" containerID="7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.005811 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" path="/var/lib/kubelet/pods/09f4aba5-9fa2-4e2d-ac39-e62905543d84/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.006561 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db5cf09-a05d-4ed0-b8b2-d84bad018d43" path="/var/lib/kubelet/pods/0db5cf09-a05d-4ed0-b8b2-d84bad018d43/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.007606 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" path="/var/lib/kubelet/pods/1a18d2e9-35be-4d8f-9d13-08296cfa2963/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.008673 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f67df75-67c0-4609-9afe-caa099a5ad1e" path="/var/lib/kubelet/pods/7f67df75-67c0-4609-9afe-caa099a5ad1e/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.009246 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ee13c4-b950-4f37-8601-f05ab94d65f7" path="/var/lib/kubelet/pods/91ee13c4-b950-4f37-8601-f05ab94d65f7/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.010586 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" path="/var/lib/kubelet/pods/a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.011298 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" path="/var/lib/kubelet/pods/aa154084-240a-486e-9eb1-21620d97ec8d/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.011946 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab278964-ff72-4353-b454-9587f235c492" path="/var/lib/kubelet/pods/ab278964-ff72-4353-b454-9587f235c492/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.012978 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" path="/var/lib/kubelet/pods/d9df3779-71e8-4441-a410-d4fe2fb2267e/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.013744 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" path="/var/lib/kubelet/pods/e91c97aa-9ab9-47e6-9821-22ee20dff312/volumes" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.033398 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.049179 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.049210 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zn9w\" (UniqueName: \"kubernetes.io/projected/52053f33-608f-4f1e-9432-baece90d08fb-kube-api-access-8zn9w\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.049220 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52053f33-608f-4f1e-9432-baece90d08fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.057253 4787 scope.go:117] "RemoveContainer" containerID="70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.057696 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.069432 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.069842 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.081200 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.086013 4787 scope.go:117] "RemoveContainer" containerID="7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.088079 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34\": container with ID starting with 7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34 not found: ID does not exist" containerID="7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.088179 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34"} err="failed to get container status \"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34\": rpc error: code = NotFound desc = could not find container \"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34\": container with ID starting with 7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34 not found: ID does not exist" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.088238 4787 scope.go:117] "RemoveContainer" containerID="70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.088578 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971\": container with ID starting with 70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971 not found: ID does not exist" containerID="70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.088608 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971"} err="failed to get container status \"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971\": rpc error: code = NotFound desc = could not find container \"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971\": container with ID starting with 70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971 not found: ID does not exist" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.088624 4787 scope.go:117] "RemoveContainer" containerID="7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.089066 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34"} err="failed to get container status \"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34\": rpc error: code = NotFound desc = could not find container \"7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34\": container with ID starting with 7161701d38f9fcde2a18a5b47de38aba0dde9cdb2f2c4e687a42b30b0da81d34 not found: ID does not exist" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.089101 4787 scope.go:117] "RemoveContainer" containerID="70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.089430 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971"} err="failed to get container status \"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971\": rpc error: code = NotFound desc = could not find container \"70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971\": container with ID starting with 70c54386cb50520c5ba561c578b02fd58a890eb466239449c145033f9877d971 not found: ID does not exist" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.089448 4787 scope.go:117] "RemoveContainer" containerID="740be74bb4087b9bcad6b9181de441c8a1b42db5c5831a84ddf16b1021efb323" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.151793 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152114 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="ovsdbserver-nb" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152129 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="ovsdbserver-nb" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152143 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="mysql-bootstrap" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152150 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="mysql-bootstrap" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152158 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="galera" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152164 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="galera" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152170 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52053f33-608f-4f1e-9432-baece90d08fb" containerName="nova-cell0-conductor-conductor" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152177 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="52053f33-608f-4f1e-9432-baece90d08fb" containerName="nova-cell0-conductor-conductor" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152188 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="init" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152193 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="init" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152203 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-httpd" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152209 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-httpd" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152222 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-server" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152229 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-server" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152243 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152248 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152258 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="probe" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152263 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="probe" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152274 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="extract-utilities" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152279 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="extract-utilities" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152290 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="ovsdbserver-sb" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152295 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="ovsdbserver-sb" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152307 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="extract-content" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152312 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="extract-content" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152321 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="cinder-scheduler" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152327 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="cinder-scheduler" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152336 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab278964-ff72-4353-b454-9587f235c492" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152342 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab278964-ff72-4353-b454-9587f235c492" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152353 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152359 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152372 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="dnsmasq-dns" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152379 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="dnsmasq-dns" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152387 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152393 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.152400 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="registry-server" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152405 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="registry-server" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152614 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="ovsdbserver-sb" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152627 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="52053f33-608f-4f1e-9432-baece90d08fb" containerName="nova-cell0-conductor-conductor" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152635 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="ovsdbserver-nb" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152645 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-httpd" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152651 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="probe" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152674 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a18d2e9-35be-4d8f-9d13-08296cfa2963" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152682 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-server" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152694 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab278964-ff72-4353-b454-9587f235c492" containerName="nova-cell1-novncproxy-novncproxy" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152702 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f4aba5-9fa2-4e2d-ac39-e62905543d84" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152712 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerName="galera" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152720 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa154084-240a-486e-9eb1-21620d97ec8d" containerName="dnsmasq-dns" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152726 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2ab58f8-5b38-4eff-b2a9-f1c4a1090f8e" containerName="registry-server" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152737 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9df3779-71e8-4441-a410-d4fe2fb2267e" containerName="cinder-scheduler" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.152745 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e91c97aa-9ab9-47e6-9821-22ee20dff312" containerName="openstack-network-exporter" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.153263 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.158909 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.158956 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.158977 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159024 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159057 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts\") pod \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159076 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159090 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159106 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159137 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l65h\" (UniqueName: \"kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159163 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159178 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph7vp\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159197 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159221 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159245 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts\") pod \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159313 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bngph\" (UniqueName: \"kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph\") pod \"ac650857-a714-4612-90b0-a8dada6949bb\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159335 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159358 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159386 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shwv7\" (UniqueName: \"kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7\") pod \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\" (UID: \"05e445c0-f43c-4cc7-854d-fa9bfe184d2e\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159422 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs\") pod \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\" (UID: \"ec204729-6346-4c3a-a479-2a2aa58eb3bc\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159470 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts\") pod \"ac650857-a714-4612-90b0-a8dada6949bb\" (UID: \"ac650857-a714-4612-90b0-a8dada6949bb\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159492 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle\") pod \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\" (UID: \"bf2e6240-bb2e-45fc-b33f-3b54a718f136\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.159514 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trs8d\" (UniqueName: \"kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d\") pod \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\" (UID: \"6cba6e64-0710-4f92-aca6-f141b4ebcaea\") " Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.161513 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "05e445c0-f43c-4cc7-854d-fa9bfe184d2e" (UID: "05e445c0-f43c-4cc7-854d-fa9bfe184d2e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.163100 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.164633 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp" (OuterVolumeSpecName: "kube-api-access-ph7vp") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "kube-api-access-ph7vp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.166374 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.166849 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.167057 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.167296 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.167956 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6cba6e64-0710-4f92-aca6-f141b4ebcaea" (UID: "6cba6e64-0710-4f92-aca6-f141b4ebcaea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.168149 4787 scope.go:117] "RemoveContainer" containerID="246d9c4ad6eb156d29b612e0dfe0c3ea5a66d4f35eaf032b14fcc0c513cef32c" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.168568 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.168760 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.175984 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ac650857-a714-4612-90b0-a8dada6949bb" (UID: "ac650857-a714-4612-90b0-a8dada6949bb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.188986 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.190658 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.190848 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h" (OuterVolumeSpecName: "kube-api-access-4l65h") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "kube-api-access-4l65h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.203636 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d" (OuterVolumeSpecName: "kube-api-access-trs8d") pod "6cba6e64-0710-4f92-aca6-f141b4ebcaea" (UID: "6cba6e64-0710-4f92-aca6-f141b4ebcaea"). InnerVolumeSpecName "kube-api-access-trs8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.215712 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7" (OuterVolumeSpecName: "kube-api-access-shwv7") pod "05e445c0-f43c-4cc7-854d-fa9bfe184d2e" (UID: "05e445c0-f43c-4cc7-854d-fa9bfe184d2e"). InnerVolumeSpecName "kube-api-access-shwv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.236801 4787 scope.go:117] "RemoveContainer" containerID="da4713d4a7d29b71f50eb1206c38223a024fe96ef15d57aa224e3edbf3ee1b4e" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261724 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261762 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c92wl\" (UniqueName: \"kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261881 4787 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261892 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261903 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261911 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261921 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261930 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l65h\" (UniqueName: \"kubernetes.io/projected/bf2e6240-bb2e-45fc-b33f-3b54a718f136-kube-api-access-4l65h\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261938 4787 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261949 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph7vp\" (UniqueName: \"kubernetes.io/projected/ec204729-6346-4c3a-a479-2a2aa58eb3bc-kube-api-access-ph7vp\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261958 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf2e6240-bb2e-45fc-b33f-3b54a718f136-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261968 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6cba6e64-0710-4f92-aca6-f141b4ebcaea-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261976 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ec204729-6346-4c3a-a479-2a2aa58eb3bc-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261985 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shwv7\" (UniqueName: \"kubernetes.io/projected/05e445c0-f43c-4cc7-854d-fa9bfe184d2e-kube-api-access-shwv7\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.261993 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac650857-a714-4612-90b0-a8dada6949bb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.262002 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trs8d\" (UniqueName: \"kubernetes.io/projected/6cba6e64-0710-4f92-aca6-f141b4ebcaea-kube-api-access-trs8d\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.275557 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "mysql-db") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.276639 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph" (OuterVolumeSpecName: "kube-api-access-bngph") pod "ac650857-a714-4612-90b0-a8dada6949bb" (UID: "ac650857-a714-4612-90b0-a8dada6949bb"). InnerVolumeSpecName "kube-api-access-bngph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.367703 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.367769 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c92wl\" (UniqueName: \"kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.367869 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bngph\" (UniqueName: \"kubernetes.io/projected/ac650857-a714-4612-90b0-a8dada6949bb-kube-api-access-bngph\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.367897 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.369005 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.369059 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:58.369044872 +0000 UTC m=+1497.130305148 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.369515 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.400147 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c92wl\" (UniqueName: \"kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl\") pod \"root-account-create-update-lsntj\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.473140 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.479694 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.495555 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.512765 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e613-account-create-update-mrxwq" event={"ID":"6cba6e64-0710-4f92-aca6-f141b4ebcaea","Type":"ContainerDied","Data":"f2c9cfa02f2ac116bd49ab90325cc22bca1cedcd024e432fd53489effb8cff09"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.512779 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e613-account-create-update-mrxwq" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.513138 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.515947 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data" (OuterVolumeSpecName: "config-data") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.571998 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.572034 4787 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.572043 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.572053 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.576838 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.578001 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.578803 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 29 13:40:54 crc kubenswrapper[4787]: E0129 13:40:54.578828 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerName="nova-cell1-conductor-conductor" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.581735 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.581974 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-central-agent" containerID="cri-o://2ed40eb4eb2d3cdf467d437f74046a37ba0764a96809f702ca6b5a682ad85043" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.582304 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="proxy-httpd" containerID="cri-o://a8692568c18184c00d2da8446d64f8b935631ac2dbe63cd7cc1211e1a04eae5d" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.582351 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="sg-core" containerID="cri-o://e36b1605a3b04ed9fbf874c7a5383659083ef860921f7d81eb3be81f257cfe63" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.582381 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-notification-agent" containerID="cri-o://ba639ee3ff7377d69eaf774b671ae0c100eaa7448c3c876737ed5dc86aecd94f" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.598767 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.599153 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" containerName="kube-state-metrics" containerID="cri-o://13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.606246 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hz6gf_383ed8f7-22dd-49b6-a932-6425cc62a6d1/ovn-controller/0.log" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.606295 4787 generic.go:334] "Generic (PLEG): container finished" podID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerID="017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" exitCode=143 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.606372 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf" event={"ID":"383ed8f7-22dd-49b6-a932-6425cc62a6d1","Type":"ContainerDied","Data":"017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.627653 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" event={"ID":"ec204729-6346-4c3a-a479-2a2aa58eb3bc","Type":"ContainerDied","Data":"341921462c367e30f02fe0fa0f1eff5bd72a7e8cc914ccf63e1d930dcb777abe"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.627759 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.631687 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.632529 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ec204729-6346-4c3a-a479-2a2aa58eb3bc" (UID: "ec204729-6346-4c3a-a479-2a2aa58eb3bc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.644024 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"52053f33-608f-4f1e-9432-baece90d08fb","Type":"ContainerDied","Data":"d96a1798aec1e4f527b2f3cfd3ec91125a39e1f9e6326a7749f987b8d96dc8a4"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.644110 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.644738 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf2e6240-bb2e-45fc-b33f-3b54a718f136" (UID: "bf2e6240-bb2e-45fc-b33f-3b54a718f136"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.665611 4787 generic.go:334] "Generic (PLEG): container finished" podID="87eff82d-823f-44a9-b96b-fed35701c54b" containerID="1f0d6877829ebcf7be918239787102e0a2f16c103fadf03c565be16af5f1f03a" exitCode=0 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.665671 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerDied","Data":"1f0d6877829ebcf7be918239787102e0a2f16c103fadf03c565be16af5f1f03a"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.667805 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" event={"ID":"69319341-ad07-4fcc-a65a-585f389382ab","Type":"ContainerDied","Data":"0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.667826 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b4e40173f801cf3cca2b72fb97793e0e61315eb717127f50f6c1782655c21a5" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.673419 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.673471 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2e6240-bb2e-45fc-b33f-3b54a718f136-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.673486 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec204729-6346-4c3a-a479-2a2aa58eb3bc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.701963 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8qnjj" event={"ID":"6caca38d-9421-4a65-8e5e-ddc0343460c2","Type":"ContainerDied","Data":"7417f0da9084cdbf95b5da986c8c1337229223aa4ccbfc9a59d44c79409e0077"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.702008 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7417f0da9084cdbf95b5da986c8c1337229223aa4ccbfc9a59d44c79409e0077" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.727688 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" event={"ID":"05668608-1ac5-4376-b0ef-0ec5604136a1","Type":"ContainerDied","Data":"6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.727733 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c4824ef480b1b5baa587a0cb20d038af7e99e91caf8712fd86b6e60b5c40b26" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.751580 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dee3-account-create-update-zj75j"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.758704 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.758899 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="d95df36d-a737-4136-8921-01fe4e028add" containerName="memcached" containerID="cri-o://2e159ae76f0bb63f2124cb8a5615db9a9eac4b38c6806f2dcf9a137b01700373" gracePeriod=30 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.762776 4787 generic.go:334] "Generic (PLEG): container finished" podID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" exitCode=0 Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.762897 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerDied","Data":"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.762924 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"bf2e6240-bb2e-45fc-b33f-3b54a718f136","Type":"ContainerDied","Data":"694b58bc8dcf98befd85af5928ae9cc780331b3fd586819fe23110c2a890d891"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.763050 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.789880 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dee3-account-create-update-zj75j"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.801260 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dee3-account-create-update-74nmg"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.819945 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dee3-account-create-update-74nmg"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.819979 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-fedf-account-create-update-5g5mh" event={"ID":"e6256c29-7af4-4921-b494-ef3a2e5e976f","Type":"ContainerDied","Data":"206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.820000 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="206b081694b0afda3d931e1379b09aa6ce2805fe70e43da1379d9173eb1ab621" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.820080 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.825501 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.839271 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-e093-account-create-update-dlpwc" event={"ID":"05e445c0-f43c-4cc7-854d-fa9bfe184d2e","Type":"ContainerDied","Data":"5e449393cbee7259a3444b79aecb02ff40fafb05e7eb788367f9bcf41d77d2c1"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.839377 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-e093-account-create-update-dlpwc" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.865786 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-l4wkc"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.880215 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-zlc9h"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.887193 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-70cd-account-create-update-pns45" event={"ID":"ac650857-a714-4612-90b0-a8dada6949bb","Type":"ContainerDied","Data":"60e2e0aa0fd318196d493910f6a0489b4e9b9b6f3924b219b9c47dcaaa6f833d"} Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.887294 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-70cd-account-create-update-pns45" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.903932 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.904275 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm7bf\" (UniqueName: \"kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.924405 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-l4wkc"] Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.952115 4787 scope.go:117] "RemoveContainer" containerID="a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9" Jan 29 13:40:54 crc kubenswrapper[4787]: I0129 13:40:54.996576 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:54.998865 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:54.999091 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-696b9bdfd-pnnmf" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerName="keystone-api" containerID="cri-o://5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b" gracePeriod=30 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.008932 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.009092 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm7bf\" (UniqueName: \"kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.009625 4787 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.009673 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:55.509651435 +0000 UTC m=+1494.270911711 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : configmap "openstack-scripts" not found Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.018669 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.165:8776/healthcheck\": dial tcp 10.217.0.165:8776: connect: connection refused" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.020647 4787 projected.go:194] Error preparing data for projected volume kube-api-access-zm7bf for pod openstack/keystone-dee3-account-create-update-74nmg: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.020720 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:55.520694073 +0000 UTC m=+1494.281954349 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-zm7bf" (UniqueName: "kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.076257 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-zlc9h"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.076617 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5687c787c6-cdl5t" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:43536->10.217.0.164:9311: read: connection reset by peer" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.077050 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5687c787c6-cdl5t" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:43524->10.217.0.164:9311: read: connection reset by peer" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.108503 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.111520 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkn9l\" (UniqueName: \"kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l\") pod \"6caca38d-9421-4a65-8e5e-ddc0343460c2\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.111546 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts\") pod \"6caca38d-9421-4a65-8e5e-ddc0343460c2\" (UID: \"6caca38d-9421-4a65-8e5e-ddc0343460c2\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.115427 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6caca38d-9421-4a65-8e5e-ddc0343460c2" (UID: "6caca38d-9421-4a65-8e5e-ddc0343460c2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.122124 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-k92qs"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.128276 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-k92qs"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.133723 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l" (OuterVolumeSpecName: "kube-api-access-mkn9l") pod "6caca38d-9421-4a65-8e5e-ddc0343460c2" (UID: "6caca38d-9421-4a65-8e5e-ddc0343460c2"). InnerVolumeSpecName "kube-api-access-mkn9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.151648 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dee3-account-create-update-74nmg"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.151835 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.166824 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.200:3000/\": dial tcp 10.217.0.200:3000: connect: connection refused" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.168069 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.184079 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.186984 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.200350 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.205493 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-e093-account-create-update-dlpwc"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213365 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts\") pod \"69319341-ad07-4fcc-a65a-585f389382ab\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213473 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4dfv\" (UniqueName: \"kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv\") pod \"e6256c29-7af4-4921-b494-ef3a2e5e976f\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213504 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfqps\" (UniqueName: \"kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps\") pod \"05668608-1ac5-4376-b0ef-0ec5604136a1\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213528 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts\") pod \"e6256c29-7af4-4921-b494-ef3a2e5e976f\" (UID: \"e6256c29-7af4-4921-b494-ef3a2e5e976f\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213550 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts\") pod \"05668608-1ac5-4376-b0ef-0ec5604136a1\" (UID: \"05668608-1ac5-4376-b0ef-0ec5604136a1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213567 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwnbq\" (UniqueName: \"kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq\") pod \"69319341-ad07-4fcc-a65a-585f389382ab\" (UID: \"69319341-ad07-4fcc-a65a-585f389382ab\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213967 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkn9l\" (UniqueName: \"kubernetes.io/projected/6caca38d-9421-4a65-8e5e-ddc0343460c2-kube-api-access-mkn9l\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.213978 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6caca38d-9421-4a65-8e5e-ddc0343460c2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.215991 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e6256c29-7af4-4921-b494-ef3a2e5e976f" (UID: "e6256c29-7af4-4921-b494-ef3a2e5e976f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.216024 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.216383 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "05668608-1ac5-4376-b0ef-0ec5604136a1" (UID: "05668608-1ac5-4376-b0ef-0ec5604136a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.216427 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "69319341-ad07-4fcc-a65a-585f389382ab" (UID: "69319341-ad07-4fcc-a65a-585f389382ab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.222727 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.237303 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.252588 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.263296 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.266671 4787 scope.go:117] "RemoveContainer" containerID="a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.267098 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9\": container with ID starting with a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9 not found: ID does not exist" containerID="a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.267137 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9"} err="failed to get container status \"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9\": rpc error: code = NotFound desc = could not find container \"a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9\": container with ID starting with a0fa0a96b95105cfd10791febeba441af6bbdc0260a7ec949261e6e0563be2c9 not found: ID does not exist" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.267162 4787 scope.go:117] "RemoveContainer" containerID="0486530403f0ac601abe5de0a5af2b6b1794e8ce8171f91595d95c8ce17b47c4" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.268892 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv" (OuterVolumeSpecName: "kube-api-access-w4dfv") pod "e6256c29-7af4-4921-b494-ef3a2e5e976f" (UID: "e6256c29-7af4-4921-b494-ef3a2e5e976f"). InnerVolumeSpecName "kube-api-access-w4dfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.269117 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq" (OuterVolumeSpecName: "kube-api-access-mwnbq") pod "69319341-ad07-4fcc-a65a-585f389382ab" (UID: "69319341-ad07-4fcc-a65a-585f389382ab"). InnerVolumeSpecName "kube-api-access-mwnbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.269732 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps" (OuterVolumeSpecName: "kube-api-access-rfqps") pod "05668608-1ac5-4376-b0ef-0ec5604136a1" (UID: "05668608-1ac5-4376-b0ef-0ec5604136a1"). InnerVolumeSpecName "kube-api-access-rfqps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.272890 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-e613-account-create-update-mrxwq"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.312877 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.315841 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69319341-ad07-4fcc-a65a-585f389382ab-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.316219 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4dfv\" (UniqueName: \"kubernetes.io/projected/e6256c29-7af4-4921-b494-ef3a2e5e976f-kube-api-access-w4dfv\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.316238 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfqps\" (UniqueName: \"kubernetes.io/projected/05668608-1ac5-4376-b0ef-0ec5604136a1-kube-api-access-rfqps\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.316248 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6256c29-7af4-4921-b494-ef3a2e5e976f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.316257 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05668608-1ac5-4376-b0ef-0ec5604136a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.316265 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwnbq\" (UniqueName: \"kubernetes.io/projected/69319341-ad07-4fcc-a65a-585f389382ab-kube-api-access-mwnbq\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.323013 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:49552->10.217.0.202:8775: read: connection reset by peer" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.323919 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:49566->10.217.0.202:8775: read: connection reset by peer" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.344834 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-70cd-account-create-update-pns45"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.358180 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.365903 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-77bffb9b6f-5z6t5"] Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.431077 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="galera" containerID="cri-o://a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72" gracePeriod=30 Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.467556 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 is running failed: container process not found" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.470618 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 is running failed: container process not found" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.471876 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 is running failed: container process not found" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.471913 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerName="nova-scheduler-scheduler" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.519502 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.519742 4787 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.519795 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:56.519781103 +0000 UTC m=+1495.281041379 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : configmap "openstack-scripts" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.571635 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-zm7bf operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-dee3-account-create-update-74nmg" podUID="be423fe7-4020-41fe-90fc-bd42f5cec5db" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.577401 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hz6gf_383ed8f7-22dd-49b6-a932-6425cc62a6d1/ovn-controller/0.log" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.577471 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.578992 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.584595 4787 scope.go:117] "RemoveContainer" containerID="590a7dfe28927af5962f958eb03c0de73f3909c4b35a1e96d83e4eb1b3065948" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620476 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620539 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6554\" (UniqueName: \"kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620589 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620608 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5gnz\" (UniqueName: \"kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620654 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620679 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620738 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620829 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620874 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620911 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620967 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.620991 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.621022 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle\") pod \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\" (UID: \"383ed8f7-22dd-49b6-a932-6425cc62a6d1\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.621047 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs\") pod \"87eff82d-823f-44a9-b96b-fed35701c54b\" (UID: \"87eff82d-823f-44a9-b96b-fed35701c54b\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.621542 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm7bf\" (UniqueName: \"kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.623764 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.626599 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts" (OuterVolumeSpecName: "scripts") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.626876 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554" (OuterVolumeSpecName: "kube-api-access-k6554") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "kube-api-access-k6554". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.627872 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts" (OuterVolumeSpecName: "scripts") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.628252 4787 projected.go:194] Error preparing data for projected volume kube-api-access-zm7bf for pod openstack/keystone-dee3-account-create-update-74nmg: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.628322 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:56.628303224 +0000 UTC m=+1495.389563500 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-zm7bf" (UniqueName: "kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.631628 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs" (OuterVolumeSpecName: "logs") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.631667 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.632881 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run" (OuterVolumeSpecName: "var-run") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.635879 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz" (OuterVolumeSpecName: "kube-api-access-h5gnz") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "kube-api-access-h5gnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.671356 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.683846 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.706175 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data" (OuterVolumeSpecName: "config-data") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724383 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724422 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6554\" (UniqueName: \"kubernetes.io/projected/87eff82d-823f-44a9-b96b-fed35701c54b-kube-api-access-k6554\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.724428 4787 secret.go:188] Couldn't get secret openstack/nova-cell1-conductor-config-data: secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.724552 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data podName:f73803d0-ec9b-4483-a509-7bff9afb1d85 nodeName:}" failed. No retries permitted until 2026-01-29 13:40:59.724511781 +0000 UTC m=+1498.485772097 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data") pod "nova-cell1-conductor-0" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85") : secret "nova-cell1-conductor-config-data" not found Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724434 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5gnz\" (UniqueName: \"kubernetes.io/projected/383ed8f7-22dd-49b6-a932-6425cc62a6d1-kube-api-access-h5gnz\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724588 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/383ed8f7-22dd-49b6-a932-6425cc62a6d1-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724599 4787 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724609 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87eff82d-823f-44a9-b96b-fed35701c54b-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724617 4787 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724626 4787 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/383ed8f7-22dd-49b6-a932-6425cc62a6d1-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724638 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724646 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.724658 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.741793 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.741928 4787 scope.go:117] "RemoveContainer" containerID="2b304ab8c2c786d238ef41d0439f6a9dcc42e20c02b9de41cde375bfb43bf8a1" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.782200 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.804134 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.804562 4787 scope.go:117] "RemoveContainer" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.823104 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826309 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7chxw\" (UniqueName: \"kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826370 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs\") pod \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826416 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data\") pod \"56183615-9f6d-4fc8-8ff9-4856929e5d28\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826447 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826489 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle\") pod \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826605 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config\") pod \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826629 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826783 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzq7d\" (UniqueName: \"kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d\") pod \"56183615-9f6d-4fc8-8ff9-4856929e5d28\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826812 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826844 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826888 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826910 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826951 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz5nf\" (UniqueName: \"kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf\") pod \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\" (UID: \"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826979 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.826999 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id\") pod \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\" (UID: \"1287d5ec-d072-43ba-b553-6d2d229b7c6c\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.827029 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle\") pod \"56183615-9f6d-4fc8-8ff9-4856929e5d28\" (UID: \"56183615-9f6d-4fc8-8ff9-4856929e5d28\") " Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.827821 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.828195 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "383ed8f7-22dd-49b6-a932-6425cc62a6d1" (UID: "383ed8f7-22dd-49b6-a932-6425cc62a6d1"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.832435 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf" (OuterVolumeSpecName: "kube-api-access-hz5nf") pod "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" (UID: "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4"). InnerVolumeSpecName "kube-api-access-hz5nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.832882 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs" (OuterVolumeSpecName: "logs") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.839774 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw" (OuterVolumeSpecName: "kube-api-access-7chxw") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "kube-api-access-7chxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.839873 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts" (OuterVolumeSpecName: "scripts") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.840502 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.840584 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.843640 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d" (OuterVolumeSpecName: "kube-api-access-nzq7d") pod "56183615-9f6d-4fc8-8ff9-4856929e5d28" (UID: "56183615-9f6d-4fc8-8ff9-4856929e5d28"). InnerVolumeSpecName "kube-api-access-nzq7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.844692 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "87eff82d-823f-44a9-b96b-fed35701c54b" (UID: "87eff82d-823f-44a9-b96b-fed35701c54b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.869047 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.869210 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data" (OuterVolumeSpecName: "config-data") pod "56183615-9f6d-4fc8-8ff9-4856929e5d28" (UID: "56183615-9f6d-4fc8-8ff9-4856929e5d28"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.870719 4787 scope.go:117] "RemoveContainer" containerID="31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.878167 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" (UID: "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.913023 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" (UID: "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.918913 4787 scope.go:117] "RemoveContainer" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.921657 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8\": container with ID starting with b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8 not found: ID does not exist" containerID="b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.921696 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8"} err="failed to get container status \"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8\": rpc error: code = NotFound desc = could not find container \"b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8\": container with ID starting with b9c7e807965f757afeffca74dda30b24beb51f70c99f1a4ba140bc4aec4446c8 not found: ID does not exist" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.921722 4787 scope.go:117] "RemoveContainer" containerID="31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1" Jan 29 13:40:55 crc kubenswrapper[4787]: E0129 13:40:55.922134 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1\": container with ID starting with 31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1 not found: ID does not exist" containerID="31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.922296 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1"} err="failed to get container status \"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1\": rpc error: code = NotFound desc = could not find container \"31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1\": container with ID starting with 31a50f45d91bd2605c94cfa789417a19f601367d4dc67d42c304f888b5b133c1 not found: ID does not exist" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.923304 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" (UID: "d21a5fb3-2d4b-4b53-8fe6-45fe636362b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930234 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930277 4787 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930321 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87eff82d-823f-44a9-b96b-fed35701c54b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930335 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzq7d\" (UniqueName: \"kubernetes.io/projected/56183615-9f6d-4fc8-8ff9-4856929e5d28-kube-api-access-nzq7d\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930350 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930361 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930404 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930416 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1287d5ec-d072-43ba-b553-6d2d229b7c6c-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930427 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz5nf\" (UniqueName: \"kubernetes.io/projected/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-api-access-hz5nf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930438 4787 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1287d5ec-d072-43ba-b553-6d2d229b7c6c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930561 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/383ed8f7-22dd-49b6-a932-6425cc62a6d1-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.930591 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7chxw\" (UniqueName: \"kubernetes.io/projected/1287d5ec-d072-43ba-b553-6d2d229b7c6c-kube-api-access-7chxw\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.931726 4787 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.931745 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.932655 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.932821 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data" (OuterVolumeSpecName: "config-data") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.936478 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56183615-9f6d-4fc8-8ff9-4856929e5d28" (UID: "56183615-9f6d-4fc8-8ff9-4856929e5d28"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.964144 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1287d5ec-d072-43ba-b553-6d2d229b7c6c" (UID: "1287d5ec-d072-43ba-b553-6d2d229b7c6c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.971857 4787 generic.go:334] "Generic (PLEG): container finished" podID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerID="e1a61b54bf10478ca80351b015706e154501c6dbfad962662f03a6e51dfe02bb" exitCode=0 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.971910 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerDied","Data":"e1a61b54bf10478ca80351b015706e154501c6dbfad962662f03a6e51dfe02bb"} Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.974859 4787 generic.go:334] "Generic (PLEG): container finished" podID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerID="8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" exitCode=0 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.974904 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f73803d0-ec9b-4483-a509-7bff9afb1d85","Type":"ContainerDied","Data":"8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884"} Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.984210 4787 generic.go:334] "Generic (PLEG): container finished" podID="d95df36d-a737-4136-8921-01fe4e028add" containerID="2e159ae76f0bb63f2124cb8a5615db9a9eac4b38c6806f2dcf9a137b01700373" exitCode=0 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.984263 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d95df36d-a737-4136-8921-01fe4e028add","Type":"ContainerDied","Data":"2e159ae76f0bb63f2124cb8a5615db9a9eac4b38c6806f2dcf9a137b01700373"} Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.990928 4787 generic.go:334] "Generic (PLEG): container finished" podID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerID="ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f" exitCode=0 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.991013 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.997660 4787 generic.go:334] "Generic (PLEG): container finished" podID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" exitCode=0 Jan 29 13:40:55 crc kubenswrapper[4787]: I0129 13:40:55.997735 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.000757 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.001677 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05e445c0-f43c-4cc7-854d-fa9bfe184d2e" path="/var/lib/kubelet/pods/05e445c0-f43c-4cc7-854d-fa9bfe184d2e/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.002418 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="200c9d48-bad8-492c-942c-054c187241eb" path="/var/lib/kubelet/pods/200c9d48-bad8-492c-942c-054c187241eb/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.003178 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="391a21f6-7c95-41ff-9197-9ed01d35e73b" path="/var/lib/kubelet/pods/391a21f6-7c95-41ff-9197-9ed01d35e73b/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.005041 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="499a4ece-1afc-472d-9f39-76f56d1c8681" path="/var/lib/kubelet/pods/499a4ece-1afc-472d-9f39-76f56d1c8681/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.006944 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52053f33-608f-4f1e-9432-baece90d08fb" path="/var/lib/kubelet/pods/52053f33-608f-4f1e-9432-baece90d08fb/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.007643 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cba6e64-0710-4f92-aca6-f141b4ebcaea" path="/var/lib/kubelet/pods/6cba6e64-0710-4f92-aca6-f141b4ebcaea/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.008089 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac650857-a714-4612-90b0-a8dada6949bb" path="/var/lib/kubelet/pods/ac650857-a714-4612-90b0-a8dada6949bb/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.008686 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf2e6240-bb2e-45fc-b33f-3b54a718f136" path="/var/lib/kubelet/pods/bf2e6240-bb2e-45fc-b33f-3b54a718f136/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.013636 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea682f38-6eee-4ea1-beea-bcd14edc880e" path="/var/lib/kubelet/pods/ea682f38-6eee-4ea1-beea-bcd14edc880e/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.016225 4787 generic.go:334] "Generic (PLEG): container finished" podID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerID="083f46373caf16e71650bd1e4ebee2fe1d02f7cb3f599bbb6f51f4683a6a4fa6" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.018761 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" path="/var/lib/kubelet/pods/ec204729-6346-4c3a-a479-2a2aa58eb3bc/volumes" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037387 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerDied","Data":"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037441 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1287d5ec-d072-43ba-b553-6d2d229b7c6c","Type":"ContainerDied","Data":"258cf7aae6f3788f6fbd3eb95fd920528e7b1817b0a99e287db2d7d91d98785c"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037476 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"56183615-9f6d-4fc8-8ff9-4856929e5d28","Type":"ContainerDied","Data":"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037494 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"56183615-9f6d-4fc8-8ff9-4856929e5d28","Type":"ContainerDied","Data":"cbd8bc309ad18fff4e2798563b3d55fb3581b8ee8057d7c0c99c30f7659d1e3d"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037506 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerDied","Data":"083f46373caf16e71650bd1e4ebee2fe1d02f7cb3f599bbb6f51f4683a6a4fa6"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.037531 4787 scope.go:117] "RemoveContainer" containerID="ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.040748 4787 generic.go:334] "Generic (PLEG): container finished" podID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerID="483fbd8d88259f502b0982cdbf412937c30e55091fb48778417b91b9a155bfca" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.040823 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerDied","Data":"483fbd8d88259f502b0982cdbf412937c30e55091fb48778417b91b9a155bfca"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.045759 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data\") pod \"93f58b7a-13c3-49ef-8c78-a5931438cba6\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.045841 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs\") pod \"93f58b7a-13c3-49ef-8c78-a5931438cba6\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.045888 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle\") pod \"93f58b7a-13c3-49ef-8c78-a5931438cba6\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.045926 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs\") pod \"93f58b7a-13c3-49ef-8c78-a5931438cba6\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.046063 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkmr2\" (UniqueName: \"kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2\") pod \"93f58b7a-13c3-49ef-8c78-a5931438cba6\" (UID: \"93f58b7a-13c3-49ef-8c78-a5931438cba6\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.046652 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.046671 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56183615-9f6d-4fc8-8ff9-4856929e5d28-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.046680 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.046689 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1287d5ec-d072-43ba-b553-6d2d229b7c6c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.049327 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs" (OuterVolumeSpecName: "logs") pod "93f58b7a-13c3-49ef-8c78-a5931438cba6" (UID: "93f58b7a-13c3-49ef-8c78-a5931438cba6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.049623 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2" (OuterVolumeSpecName: "kube-api-access-mkmr2") pod "93f58b7a-13c3-49ef-8c78-a5931438cba6" (UID: "93f58b7a-13c3-49ef-8c78-a5931438cba6"). InnerVolumeSpecName "kube-api-access-mkmr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.063164 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hz6gf_383ed8f7-22dd-49b6-a932-6425cc62a6d1/ovn-controller/0.log" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.063262 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hz6gf" event={"ID":"383ed8f7-22dd-49b6-a932-6425cc62a6d1","Type":"ContainerDied","Data":"e1432302026dec057fd0a96fc8b4819324643fd29b0afe8cf0a9bdb83ca793c2"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.063383 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hz6gf" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.073961 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.076145 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.084945 4787 generic.go:334] "Generic (PLEG): container finished" podID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerID="a8692568c18184c00d2da8446d64f8b935631ac2dbe63cd7cc1211e1a04eae5d" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.084981 4787 generic.go:334] "Generic (PLEG): container finished" podID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerID="e36b1605a3b04ed9fbf874c7a5383659083ef860921f7d81eb3be81f257cfe63" exitCode=2 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.084990 4787 generic.go:334] "Generic (PLEG): container finished" podID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerID="ba639ee3ff7377d69eaf774b671ae0c100eaa7448c3c876737ed5dc86aecd94f" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.084998 4787 generic.go:334] "Generic (PLEG): container finished" podID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerID="2ed40eb4eb2d3cdf467d437f74046a37ba0764a96809f702ca6b5a682ad85043" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.085050 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerDied","Data":"a8692568c18184c00d2da8446d64f8b935631ac2dbe63cd7cc1211e1a04eae5d"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.085078 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerDied","Data":"e36b1605a3b04ed9fbf874c7a5383659083ef860921f7d81eb3be81f257cfe63"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.085099 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerDied","Data":"ba639ee3ff7377d69eaf774b671ae0c100eaa7448c3c876737ed5dc86aecd94f"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.085110 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerDied","Data":"2ed40eb4eb2d3cdf467d437f74046a37ba0764a96809f702ca6b5a682ad85043"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.087788 4787 generic.go:334] "Generic (PLEG): container finished" podID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerID="25425c352e980ca78c9d13c8057eecdcf5a099ad60b14349a5a4e3ffcfaaeba4" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.087863 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93f58b7a-13c3-49ef-8c78-a5931438cba6","Type":"ContainerDied","Data":"25425c352e980ca78c9d13c8057eecdcf5a099ad60b14349a5a4e3ffcfaaeba4"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.087972 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.093982 4787 scope.go:117] "RemoveContainer" containerID="97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.099778 4787 generic.go:334] "Generic (PLEG): container finished" podID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" containerID="13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85" exitCode=2 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.099854 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4","Type":"ContainerDied","Data":"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.099882 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d21a5fb3-2d4b-4b53-8fe6-45fe636362b4","Type":"ContainerDied","Data":"086323df70f3f8b9810725ffa69a96b1490cf46097669d3fecec1553fbcd66aa"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.099965 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.102285 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-69d8bc6c98-vmd8w" event={"ID":"87eff82d-823f-44a9-b96b-fed35701c54b","Type":"ContainerDied","Data":"d1e6cbe8ec4f7d4811522738b72c5fbbc76a769496c56c7335ad72ba279a33ad"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.102404 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-69d8bc6c98-vmd8w" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.111270 4787 generic.go:334] "Generic (PLEG): container finished" podID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerID="a8b8d261a49e47ed22a0cd5c563cf4143e2a75230a47767988c41d719d54d742" exitCode=0 Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.111567 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116160 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a","Type":"ContainerDied","Data":"a8b8d261a49e47ed22a0cd5c563cf4143e2a75230a47767988c41d719d54d742"} Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116193 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8qnjj" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116242 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-fedf-account-create-update-5g5mh" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116274 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a3c3-account-create-update-h9g5j" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116248 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116648 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-06dc-account-create-update-xzhzd" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.116950 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data" (OuterVolumeSpecName: "config-data") pod "93f58b7a-13c3-49ef-8c78-a5931438cba6" (UID: "93f58b7a-13c3-49ef-8c78-a5931438cba6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.124892 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93f58b7a-13c3-49ef-8c78-a5931438cba6" (UID: "93f58b7a-13c3-49ef-8c78-a5931438cba6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.134651 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.144245 4787 scope.go:117] "RemoveContainer" containerID="ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.144424 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.145419 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f\": container with ID starting with ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f not found: ID does not exist" containerID="ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.145495 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f"} err="failed to get container status \"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f\": rpc error: code = NotFound desc = could not find container \"ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f\": container with ID starting with ddbea1a45470ae4696f6d995f467267d1d7f9f5eb210a9bfe55f4f6d4ddccb5f not found: ID does not exist" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.145532 4787 scope.go:117] "RemoveContainer" containerID="97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a" Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.146860 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a\": container with ID starting with 97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a not found: ID does not exist" containerID="97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.146889 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a"} err="failed to get container status \"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a\": rpc error: code = NotFound desc = could not find container \"97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a\": container with ID starting with 97367004be62912ce7528007c71f03670e3cc96c0c0f04f2eb9cea07eb8f5f6a not found: ID does not exist" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.146907 4787 scope.go:117] "RemoveContainer" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148208 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148294 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148326 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148432 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2ffs\" (UniqueName: \"kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148500 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148560 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148603 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.148640 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts\") pod \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\" (UID: \"4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.149093 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.149177 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs" (OuterVolumeSpecName: "logs") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150229 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkmr2\" (UniqueName: \"kubernetes.io/projected/93f58b7a-13c3-49ef-8c78-a5931438cba6-kube-api-access-mkmr2\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150251 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150261 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150270 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150277 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.150288 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f58b7a-13c3-49ef-8c78-a5931438cba6-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.159168 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs" (OuterVolumeSpecName: "kube-api-access-z2ffs") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "kube-api-access-z2ffs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.164346 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts" (OuterVolumeSpecName: "scripts") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.168242 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.170671 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "93f58b7a-13c3-49ef-8c78-a5931438cba6" (UID: "93f58b7a-13c3-49ef-8c78-a5931438cba6"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.177779 4787 scope.go:117] "RemoveContainer" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.178242 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518\": container with ID starting with 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 not found: ID does not exist" containerID="29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.178286 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518"} err="failed to get container status \"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518\": rpc error: code = NotFound desc = could not find container \"29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518\": container with ID starting with 29445c130219d6967e94c6ed51c6d6ea86464df69e2e576c0b3cb4b57d80c518 not found: ID does not exist" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.178315 4787 scope.go:117] "RemoveContainer" containerID="017e22c111bbd8760ee2a71366611b3b0a47678a98b47665937435aba13da530" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.187090 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.201498 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data" (OuterVolumeSpecName: "config-data") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.201539 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.207918 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" (UID: "4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.212627 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.223704 4787 scope.go:117] "RemoveContainer" containerID="25425c352e980ca78c9d13c8057eecdcf5a099ad60b14349a5a4e3ffcfaaeba4" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251791 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2ffs\" (UniqueName: \"kubernetes.io/projected/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-kube-api-access-z2ffs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251824 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251857 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251869 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251881 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251892 4787 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93f58b7a-13c3-49ef-8c78-a5931438cba6-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.251903 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.283500 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.353759 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.537174 4787 scope.go:117] "RemoveContainer" containerID="221715efc1d70075a201ff1b336d7ca967d74dcf9c3ba5c93e0689478777ecd0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.557475 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.557617 4787 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.557679 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:58.557660842 +0000 UTC m=+1497.318921108 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : configmap "openstack-scripts" not found Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.636017 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.636493 4787 scope.go:117] "RemoveContainer" containerID="13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658388 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config\") pod \"d95df36d-a737-4136-8921-01fe4e028add\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658430 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs\") pod \"d95df36d-a737-4136-8921-01fe4e028add\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658511 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qjnt\" (UniqueName: \"kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt\") pod \"d95df36d-a737-4136-8921-01fe4e028add\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658566 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data\") pod \"d95df36d-a737-4136-8921-01fe4e028add\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658612 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle\") pod \"d95df36d-a737-4136-8921-01fe4e028add\" (UID: \"d95df36d-a737-4136-8921-01fe4e028add\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.658925 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm7bf\" (UniqueName: \"kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf\") pod \"keystone-dee3-account-create-update-74nmg\" (UID: \"be423fe7-4020-41fe-90fc-bd42f5cec5db\") " pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.659913 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "d95df36d-a737-4136-8921-01fe4e028add" (UID: "d95df36d-a737-4136-8921-01fe4e028add"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.665289 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data" (OuterVolumeSpecName: "config-data") pod "d95df36d-a737-4136-8921-01fe4e028add" (UID: "d95df36d-a737-4136-8921-01fe4e028add"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.665608 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.665771 4787 projected.go:194] Error preparing data for projected volume kube-api-access-zm7bf for pod openstack/keystone-dee3-account-create-update-74nmg: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.665844 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf podName:be423fe7-4020-41fe-90fc-bd42f5cec5db nodeName:}" failed. No retries permitted until 2026-01-29 13:40:58.665823233 +0000 UTC m=+1497.427083619 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-zm7bf" (UniqueName: "kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf") pod "keystone-dee3-account-create-update-74nmg" (UID: "be423fe7-4020-41fe-90fc-bd42f5cec5db") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.686199 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt" (OuterVolumeSpecName: "kube-api-access-2qjnt") pod "d95df36d-a737-4136-8921-01fe4e028add" (UID: "d95df36d-a737-4136-8921-01fe4e028add"). InnerVolumeSpecName "kube-api-access-2qjnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.698394 4787 scope.go:117] "RemoveContainer" containerID="13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.702746 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.707737 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d95df36d-a737-4136-8921-01fe4e028add" (UID: "d95df36d-a737-4136-8921-01fe4e028add"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: E0129 13:40:56.714628 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85\": container with ID starting with 13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85 not found: ID does not exist" containerID="13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.714671 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85"} err="failed to get container status \"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85\": rpc error: code = NotFound desc = could not find container \"13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85\": container with ID starting with 13fe6f522e00258a81331b3f60f30fe2124a815231e6c4a66ab4ed7505f37e85 not found: ID does not exist" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.714694 4787 scope.go:117] "RemoveContainer" containerID="1f0d6877829ebcf7be918239787102e0a2f16c103fadf03c565be16af5f1f03a" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.715510 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-69d8bc6c98-vmd8w"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.743511 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.749077 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hz6gf"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.760361 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data\") pod \"f73803d0-ec9b-4483-a509-7bff9afb1d85\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.760413 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zd9q\" (UniqueName: \"kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q\") pod \"f73803d0-ec9b-4483-a509-7bff9afb1d85\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.760517 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle\") pod \"f73803d0-ec9b-4483-a509-7bff9afb1d85\" (UID: \"f73803d0-ec9b-4483-a509-7bff9afb1d85\") " Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.761037 4787 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.761061 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qjnt\" (UniqueName: \"kubernetes.io/projected/d95df36d-a737-4136-8921-01fe4e028add-kube-api-access-2qjnt\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.761073 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d95df36d-a737-4136-8921-01fe4e028add-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.761084 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.780681 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q" (OuterVolumeSpecName: "kube-api-access-9zd9q") pod "f73803d0-ec9b-4483-a509-7bff9afb1d85" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85"). InnerVolumeSpecName "kube-api-access-9zd9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.781751 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "d95df36d-a737-4136-8921-01fe4e028add" (UID: "d95df36d-a737-4136-8921-01fe4e028add"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.796471 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.809897 4787 scope.go:117] "RemoveContainer" containerID="7fa1b78bdd06010ec0e648c7dc942c45ba46fb8d183d57540ee5aece3a17a14d" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.810627 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-8qnjj"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.855963 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.865575 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-06dc-account-create-update-xzhzd"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.868643 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data" (OuterVolumeSpecName: "config-data") pod "f73803d0-ec9b-4483-a509-7bff9afb1d85" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.879302 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zd9q\" (UniqueName: \"kubernetes.io/projected/f73803d0-ec9b-4483-a509-7bff9afb1d85-kube-api-access-9zd9q\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.879582 4787 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d95df36d-a737-4136-8921-01fe4e028add-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.890006 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.896518 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.908639 4787 scope.go:117] "RemoveContainer" containerID="a8b8d261a49e47ed22a0cd5c563cf4143e2a75230a47767988c41d719d54d742" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.930926 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f73803d0-ec9b-4483-a509-7bff9afb1d85" (UID: "f73803d0-ec9b-4483-a509-7bff9afb1d85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.933110 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 29 13:40:56 crc kubenswrapper[4787]: I0129 13:40:56.992610 4787 scope.go:117] "RemoveContainer" containerID="431d19c70bb7fc09c0cea13cec37421e6b58778d78ec5b6bf958f731b6ca0476" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:56.997528 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008052 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008103 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkz9l\" (UniqueName: \"kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008126 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008191 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008239 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008290 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008337 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle\") pod \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\" (UID: \"3f204ba0-4972-4e50-9c21-e9639ef73ff3\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008664 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.008676 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f73803d0-ec9b-4483-a509-7bff9afb1d85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.019580 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a3c3-account-create-update-h9g5j"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.023907 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs" (OuterVolumeSpecName: "logs") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.025759 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.026606 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l" (OuterVolumeSpecName: "kube-api-access-vkz9l") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "kube-api-access-vkz9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.030639 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.051149 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.058040 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.069804 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.074674 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-fedf-account-create-update-5g5mh"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.077624 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.078506 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.081885 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data" (OuterVolumeSpecName: "config-data") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.088770 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.097495 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.110953 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.111017 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.111371 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.111540 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9zwk\" (UniqueName: \"kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.111616 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.111643 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle\") pod \"b9df2172-145d-4edd-8d1c-7cc6768840bb\" (UID: \"b9df2172-145d-4edd-8d1c-7cc6768840bb\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112513 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112532 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112541 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkz9l\" (UniqueName: \"kubernetes.io/projected/3f204ba0-4972-4e50-9c21-e9639ef73ff3-kube-api-access-vkz9l\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112552 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112562 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.112570 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f204ba0-4972-4e50-9c21-e9639ef73ff3-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.121228 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs" (OuterVolumeSpecName: "logs") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.149999 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk" (OuterVolumeSpecName: "kube-api-access-k9zwk") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "kube-api-access-k9zwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.151806 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_cdeb3ae9-0105-40e4-889d-7d9ab0be4427/ovn-northd/0.log" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.151875 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.153257 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c27d0b15-3660-4d2c-b5f1-89392d93317f","Type":"ContainerDied","Data":"9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.153297 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f069aa04f88b7c5ee56bbc9eb45c7a4186e19669a71c1711a468517600b7000" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.158069 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3f204ba0-4972-4e50-9c21-e9639ef73ff3" (UID: "3f204ba0-4972-4e50-9c21-e9639ef73ff3"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.159444 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.160424 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.160957 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.162512 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data" (OuterVolumeSpecName: "config-data") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.173772 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"67675e2f-3a2b-4552-bbd5-c12b3ba3a505","Type":"ContainerDied","Data":"510292bf20b18a58473d8b589500d70c5a7346b2e80c96773be02224365a7c29"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.173822 4787 scope.go:117] "RemoveContainer" containerID="e1a61b54bf10478ca80351b015706e154501c6dbfad962662f03a6e51dfe02bb" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.173930 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.182539 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.184924 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_cdeb3ae9-0105-40e4-889d-7d9ab0be4427/ovn-northd/0.log" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.184986 4787 generic.go:334] "Generic (PLEG): container finished" podID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" exitCode=139 Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.185083 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerDied","Data":"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.185126 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"cdeb3ae9-0105-40e4-889d-7d9ab0be4427","Type":"ContainerDied","Data":"bb8343c68390c1419fd28be0f24f8130ffd16f339fefc6537ba09f0aebc484bc"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.185224 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.185846 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b9df2172-145d-4edd-8d1c-7cc6768840bb" (UID: "b9df2172-145d-4edd-8d1c-7cc6768840bb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.193774 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5687c787c6-cdl5t" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.193626 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5687c787c6-cdl5t" event={"ID":"3f204ba0-4972-4e50-9c21-e9639ef73ff3","Type":"ContainerDied","Data":"4ff24cdac41bb221041b39bdc8cfeb3cb5a9238340b306c6866ecc06e7a9372d"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.197101 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d95df36d-a737-4136-8921-01fe4e028add","Type":"ContainerDied","Data":"838949beaab6d850d64bfa417ae4b78437f8aff3a1873bad8750a4b6d346a500"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.197195 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.204134 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b9df2172-145d-4edd-8d1c-7cc6768840bb","Type":"ContainerDied","Data":"a3663a69244dbbe2eff33d12bc6d3fd8afdfbea1552c58cf5b4e7d12553ee84e"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.204223 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.217869 4787 scope.go:117] "RemoveContainer" containerID="2f63dfa9d7bf21cc31d7b9d8c380ef9fbae854b8b7d202f4e7c02c9ab75414bb" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219154 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219231 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219279 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219312 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219350 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219517 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5q4f\" (UniqueName: \"kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219558 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nwwz\" (UniqueName: \"kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219587 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219653 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219696 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.219737 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.221107 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.221788 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.221871 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.221941 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir\") pod \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\" (UID: \"cdeb3ae9-0105-40e4-889d-7d9ab0be4427\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222238 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222286 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222326 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222362 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222639 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222677 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222719 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle\") pod \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\" (UID: \"67675e2f-3a2b-4552-bbd5-c12b3ba3a505\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.222757 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gngkq\" (UniqueName: \"kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq\") pod \"c27d0b15-3660-4d2c-b5f1-89392d93317f\" (UID: \"c27d0b15-3660-4d2c-b5f1-89392d93317f\") " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.232298 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config" (OuterVolumeSpecName: "config") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239314 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239344 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239359 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239378 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f204ba0-4972-4e50-9c21-e9639ef73ff3-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239391 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9zwk\" (UniqueName: \"kubernetes.io/projected/b9df2172-145d-4edd-8d1c-7cc6768840bb-kube-api-access-k9zwk\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239403 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9df2172-145d-4edd-8d1c-7cc6768840bb-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239413 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.239428 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9df2172-145d-4edd-8d1c-7cc6768840bb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.239523 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.239568 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data podName:6285155e-2d1b-4c6f-be33-5f2681a7b5e0 nodeName:}" failed. No retries permitted until 2026-01-29 13:41:05.239553267 +0000 UTC m=+1504.000813543 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data") pod "rabbitmq-cell1-server-0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0") : configmap "rabbitmq-cell1-config-data" not found Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.241409 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f" (OuterVolumeSpecName: "kube-api-access-x5q4f") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "kube-api-access-x5q4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.244780 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts" (OuterVolumeSpecName: "scripts") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.245593 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dee3-account-create-update-74nmg" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.246445 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs" (OuterVolumeSpecName: "logs") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.249809 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.249964 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.252615 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts" (OuterVolumeSpecName: "scripts") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.252711 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f73803d0-ec9b-4483-a509-7bff9afb1d85","Type":"ContainerDied","Data":"3c4253f6e6cfc2c7c17384367dcdcce3e84c35a4fae868a2f8beccc85a75f330"} Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.252892 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.254491 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.263124 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.265801 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz" (OuterVolumeSpecName: "kube-api-access-8nwwz") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "kube-api-access-8nwwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.269226 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq" (OuterVolumeSpecName: "kube-api-access-gngkq") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "kube-api-access-gngkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.270015 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.293118 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.296583 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts" (OuterVolumeSpecName: "scripts") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.324851 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.331832 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.340677 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.340836 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.340912 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.340969 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5q4f\" (UniqueName: \"kubernetes.io/projected/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-kube-api-access-x5q4f\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341019 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nwwz\" (UniqueName: \"kubernetes.io/projected/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-kube-api-access-8nwwz\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341080 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341138 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341189 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341239 4787 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341287 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341336 4787 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c27d0b15-3660-4d2c-b5f1-89392d93317f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341384 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341437 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.341504 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.344855 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gngkq\" (UniqueName: \"kubernetes.io/projected/c27d0b15-3660-4d2c-b5f1-89392d93317f-kube-api-access-gngkq\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.343516 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.360355 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.367129 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.371374 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.373681 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:57 crc kubenswrapper[4787]: W0129 13:40:57.383763 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod142aa03d_77a5_40d1_a99f_8d7d33338721.slice/crio-d582e156dd6426194ee12881765ec0dc4c67374240c4cccb9b2ba9289543a024 WatchSource:0}: Error finding container d582e156dd6426194ee12881765ec0dc4c67374240c4cccb9b2ba9289543a024: Status 404 returned error can't find the container with id d582e156dd6426194ee12881765ec0dc4c67374240c4cccb9b2ba9289543a024 Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.385999 4787 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 29 13:40:57 crc kubenswrapper[4787]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: if [ -n "" ]; then Jan 29 13:40:57 crc kubenswrapper[4787]: GRANT_DATABASE="" Jan 29 13:40:57 crc kubenswrapper[4787]: else Jan 29 13:40:57 crc kubenswrapper[4787]: GRANT_DATABASE="*" Jan 29 13:40:57 crc kubenswrapper[4787]: fi Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: # going for maximum compatibility here: Jan 29 13:40:57 crc kubenswrapper[4787]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 29 13:40:57 crc kubenswrapper[4787]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 29 13:40:57 crc kubenswrapper[4787]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 29 13:40:57 crc kubenswrapper[4787]: # support updates Jan 29 13:40:57 crc kubenswrapper[4787]: Jan 29 13:40:57 crc kubenswrapper[4787]: $MYSQL_CMD < logger="UnhandledError" Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.388814 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-lsntj" podUID="142aa03d-77a5-40d1-a99f-8d7d33338721" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.408359 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data" (OuterVolumeSpecName: "config-data") pod "67675e2f-3a2b-4552-bbd5-c12b3ba3a505" (UID: "67675e2f-3a2b-4552-bbd5-c12b3ba3a505"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.408765 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.419111 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "cdeb3ae9-0105-40e4-889d-7d9ab0be4427" (UID: "cdeb3ae9-0105-40e4-889d-7d9ab0be4427"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448435 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67675e2f-3a2b-4552-bbd5-c12b3ba3a505-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448487 4787 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448501 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448515 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448523 4787 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/cdeb3ae9-0105-40e4-889d-7d9ab0be4427-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448531 4787 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.448542 4787 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.476099 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data" (OuterVolumeSpecName: "config-data") pod "c27d0b15-3660-4d2c-b5f1-89392d93317f" (UID: "c27d0b15-3660-4d2c-b5f1-89392d93317f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.517373 4787 scope.go:117] "RemoveContainer" containerID="453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.535746 4787 scope.go:117] "RemoveContainer" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.549527 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c27d0b15-3660-4d2c-b5f1-89392d93317f-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.563556 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.569874 4787 scope.go:117] "RemoveContainer" containerID="453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.571929 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5687c787c6-cdl5t"] Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.575632 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b\": container with ID starting with 453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b not found: ID does not exist" containerID="453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.575665 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b"} err="failed to get container status \"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b\": rpc error: code = NotFound desc = could not find container \"453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b\": container with ID starting with 453155f631eb2a89d340e0a82689807aaebd51d8c85d2ebc662973ecae84e47b not found: ID does not exist" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.575687 4787 scope.go:117] "RemoveContainer" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" Jan 29 13:40:57 crc kubenswrapper[4787]: E0129 13:40:57.576073 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0\": container with ID starting with f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0 not found: ID does not exist" containerID="f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.576092 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0"} err="failed to get container status \"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0\": rpc error: code = NotFound desc = could not find container \"f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0\": container with ID starting with f18a0b85c268ecbe456a9e70d1eb937455250d8732ff3f7b16c4d3fb6422d8e0 not found: ID does not exist" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.576105 4787 scope.go:117] "RemoveContainer" containerID="483fbd8d88259f502b0982cdbf412937c30e55091fb48778417b91b9a155bfca" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.578014 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.586418 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.606729 4787 scope.go:117] "RemoveContainer" containerID="1409e2db8a0ffd4d94c91a11022596ab339a612fb26e6871a45ae7940cf15a10" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.606756 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.618004 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.641695 4787 scope.go:117] "RemoveContainer" containerID="2e159ae76f0bb63f2124cb8a5615db9a9eac4b38c6806f2dcf9a137b01700373" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.646644 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dee3-account-create-update-74nmg"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.649353 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dee3-account-create-update-74nmg"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.655480 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.658565 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.663366 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.669861 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.675854 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.680174 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.683689 4787 scope.go:117] "RemoveContainer" containerID="083f46373caf16e71650bd1e4ebee2fe1d02f7cb3f599bbb6f51f4683a6a4fa6" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.712130 4787 scope.go:117] "RemoveContainer" containerID="8c54aa6658f396298b1bba542f62e319b2dd09d1a2963008ba09c2366a51988b" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.751519 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/be423fe7-4020-41fe-90fc-bd42f5cec5db-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.751555 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zm7bf\" (UniqueName: \"kubernetes.io/projected/be423fe7-4020-41fe-90fc-bd42f5cec5db-kube-api-access-zm7bf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:57 crc kubenswrapper[4787]: I0129 13:40:57.763076 4787 scope.go:117] "RemoveContainer" containerID="8176461a98299b76193219bfaaced8b08f48e5e770c550736d32af3741ce5884" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.010605 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05668608-1ac5-4376-b0ef-0ec5604136a1" path="/var/lib/kubelet/pods/05668608-1ac5-4376-b0ef-0ec5604136a1/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.012758 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" path="/var/lib/kubelet/pods/1287d5ec-d072-43ba-b553-6d2d229b7c6c/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.013661 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" path="/var/lib/kubelet/pods/383ed8f7-22dd-49b6-a932-6425cc62a6d1/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.019655 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" path="/var/lib/kubelet/pods/3f204ba0-4972-4e50-9c21-e9639ef73ff3/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.020520 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" path="/var/lib/kubelet/pods/4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.022180 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" path="/var/lib/kubelet/pods/56183615-9f6d-4fc8-8ff9-4856929e5d28/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.023718 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" path="/var/lib/kubelet/pods/67675e2f-3a2b-4552-bbd5-c12b3ba3a505/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.024537 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69319341-ad07-4fcc-a65a-585f389382ab" path="/var/lib/kubelet/pods/69319341-ad07-4fcc-a65a-585f389382ab/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.024954 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6caca38d-9421-4a65-8e5e-ddc0343460c2" path="/var/lib/kubelet/pods/6caca38d-9421-4a65-8e5e-ddc0343460c2/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.025439 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" path="/var/lib/kubelet/pods/87eff82d-823f-44a9-b96b-fed35701c54b/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.027402 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" path="/var/lib/kubelet/pods/93f58b7a-13c3-49ef-8c78-a5931438cba6/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.029062 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" path="/var/lib/kubelet/pods/b9df2172-145d-4edd-8d1c-7cc6768840bb/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.030404 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be423fe7-4020-41fe-90fc-bd42f5cec5db" path="/var/lib/kubelet/pods/be423fe7-4020-41fe-90fc-bd42f5cec5db/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.030873 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" path="/var/lib/kubelet/pods/cdeb3ae9-0105-40e4-889d-7d9ab0be4427/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.031497 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" path="/var/lib/kubelet/pods/d21a5fb3-2d4b-4b53-8fe6-45fe636362b4/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.032069 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d95df36d-a737-4136-8921-01fe4e028add" path="/var/lib/kubelet/pods/d95df36d-a737-4136-8921-01fe4e028add/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.033172 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6256c29-7af4-4921-b494-ef3a2e5e976f" path="/var/lib/kubelet/pods/e6256c29-7af4-4921-b494-ef3a2e5e976f/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.033567 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" path="/var/lib/kubelet/pods/f73803d0-ec9b-4483-a509-7bff9afb1d85/volumes" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.072792 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6285155e_2d1b_4c6f_be33_5f2681a7b5e0.slice/crio-19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4.scope\": RecentStats: unable to find data in memory cache]" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.145656 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256209 4787 generic.go:334] "Generic (PLEG): container finished" podID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerID="a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72" exitCode=0 Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256250 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256697 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256745 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256777 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256863 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l944v\" (UniqueName: \"kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256893 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256925 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256947 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256968 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default\") pod \"7b3f405a-2fa1-4afe-8364-60489fc271ca\" (UID: \"7b3f405a-2fa1-4afe-8364-60489fc271ca\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.257753 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.257760 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.258057 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.258162 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.256265 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerDied","Data":"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72"} Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.258282 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7b3f405a-2fa1-4afe-8364-60489fc271ca","Type":"ContainerDied","Data":"3031543447d738394c139cd3721f31533505ba3717a0f738e40b447dd668095d"} Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.258317 4787 scope.go:117] "RemoveContainer" containerID="a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.262074 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v" (OuterVolumeSpecName: "kube-api-access-l944v") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "kube-api-access-l944v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.280154 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "mysql-db") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.280695 4787 generic.go:334] "Generic (PLEG): container finished" podID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerID="19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4" exitCode=0 Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.280788 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerDied","Data":"19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4"} Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.285780 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.285842 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.285899 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lsntj" event={"ID":"142aa03d-77a5-40d1-a99f-8d7d33338721","Type":"ContainerStarted","Data":"d582e156dd6426194ee12881765ec0dc4c67374240c4cccb9b2ba9289543a024"} Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.348606 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "7b3f405a-2fa1-4afe-8364-60489fc271ca" (UID: "7b3f405a-2fa1-4afe-8364-60489fc271ca"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365376 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l944v\" (UniqueName: \"kubernetes.io/projected/7b3f405a-2fa1-4afe-8364-60489fc271ca-kube-api-access-l944v\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365405 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365434 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365445 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365474 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365485 4787 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365492 4787 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7b3f405a-2fa1-4afe-8364-60489fc271ca-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.365500 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b3f405a-2fa1-4afe-8364-60489fc271ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.386086 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.394247 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.394316 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.394369 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.395124 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.395191 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" gracePeriod=600 Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.438048 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.438977 4787 scope.go:117] "RemoveContainer" containerID="9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.447577 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.452417 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.454626 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/keystone-696b9bdfd-pnnmf" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerName="keystone-api" probeResult="failure" output="Get \"https://10.217.0.152:5000/v3\": read tcp 10.217.0.2:37236->10.217.0.152:5000: read: connection reset by peer" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.466956 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.467050 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.467137 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:41:06.467120054 +0000 UTC m=+1505.228380330 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.492779 4787 scope.go:117] "RemoveContainer" containerID="a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.507268 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72\": container with ID starting with a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72 not found: ID does not exist" containerID="a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.507332 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72"} err="failed to get container status \"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72\": rpc error: code = NotFound desc = could not find container \"a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72\": container with ID starting with a7fbb0b90f4ffc470c7f3a88b8e4a9850d2adf4b1a5bd1ef0347c97713578d72 not found: ID does not exist" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.507360 4787 scope.go:117] "RemoveContainer" containerID="9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.513025 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275\": container with ID starting with 9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275 not found: ID does not exist" containerID="9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.513187 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275"} err="failed to get container status \"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275\": rpc error: code = NotFound desc = could not find container \"9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275\": container with ID starting with 9dbee67285cc4fbb582c6d856a8f804d465945b7523593148d81a2214bc14275 not found: ID does not exist" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567526 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567591 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567628 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567676 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mfc5\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567714 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567812 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567870 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567890 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.567967 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.568006 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.568036 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd\") pod \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\" (UID: \"6285155e-2d1b-4c6f-be33-5f2681a7b5e0\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.568312 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.569132 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.569533 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.573189 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.573724 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.575792 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.575858 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info" (OuterVolumeSpecName: "pod-info") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.583807 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5" (OuterVolumeSpecName: "kube-api-access-7mfc5") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "kube-api-access-7mfc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.591138 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.597846 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.597974 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.610777 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data" (OuterVolumeSpecName: "config-data") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.618207 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf" (OuterVolumeSpecName: "server-conf") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.654718 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "6285155e-2d1b-4c6f-be33-5f2681a7b5e0" (UID: "6285155e-2d1b-4c6f-be33-5f2681a7b5e0"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669202 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c92wl\" (UniqueName: \"kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl\") pod \"142aa03d-77a5-40d1-a99f-8d7d33338721\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669278 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts\") pod \"142aa03d-77a5-40d1-a99f-8d7d33338721\" (UID: \"142aa03d-77a5-40d1-a99f-8d7d33338721\") " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669674 4787 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669694 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669703 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669713 4787 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669721 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669729 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669740 4787 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669758 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669767 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mfc5\" (UniqueName: \"kubernetes.io/projected/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-kube-api-access-7mfc5\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669776 4787 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669784 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6285155e-2d1b-4c6f-be33-5f2681a7b5e0-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.669872 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "142aa03d-77a5-40d1-a99f-8d7d33338721" (UID: "142aa03d-77a5-40d1-a99f-8d7d33338721"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.672504 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl" (OuterVolumeSpecName: "kube-api-access-c92wl") pod "142aa03d-77a5-40d1-a99f-8d7d33338721" (UID: "142aa03d-77a5-40d1-a99f-8d7d33338721"). InnerVolumeSpecName "kube-api-access-c92wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.675122 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.675606 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.681565 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.681602 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.681918 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.685748 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.685838 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.694609 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:40:58 crc kubenswrapper[4787]: E0129 13:40:58.695014 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.694796 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.771445 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.771495 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c92wl\" (UniqueName: \"kubernetes.io/projected/142aa03d-77a5-40d1-a99f-8d7d33338721-kube-api-access-c92wl\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.771507 4787 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/142aa03d-77a5-40d1-a99f-8d7d33338721-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.823818 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.168:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:58 crc kubenswrapper[4787]: I0129 13:40:58.823848 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-77bffb9b6f-5z6t5" podUID="ec204729-6346-4c3a-a479-2a2aa58eb3bc" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.168:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.069169 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177178 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177216 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2k2n\" (UniqueName: \"kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177242 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177268 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177292 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177311 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177334 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.177376 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data\") pod \"3d1018e7-6cf6-4c3e-b351-6249e795620d\" (UID: \"3d1018e7-6cf6-4c3e-b351-6249e795620d\") " Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.181402 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n" (OuterVolumeSpecName: "kube-api-access-s2k2n") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "kube-api-access-s2k2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.181636 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.181755 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.182467 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts" (OuterVolumeSpecName: "scripts") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.200485 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.216373 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data" (OuterVolumeSpecName: "config-data") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.220036 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.242847 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3d1018e7-6cf6-4c3e-b351-6249e795620d" (UID: "3d1018e7-6cf6-4c3e-b351-6249e795620d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279702 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279726 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2k2n\" (UniqueName: \"kubernetes.io/projected/3d1018e7-6cf6-4c3e-b351-6249e795620d-kube-api-access-s2k2n\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279737 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279747 4787 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279755 4787 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279764 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279772 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.279779 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d1018e7-6cf6-4c3e-b351-6249e795620d-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.305574 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6285155e-2d1b-4c6f-be33-5f2681a7b5e0","Type":"ContainerDied","Data":"a477b250288ea993e4f89842a04898f7f4cf6530724bf70b13e4d844bd478e04"} Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.305606 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.305633 4787 scope.go:117] "RemoveContainer" containerID="19173a801f2b7b195813a7a47563e2f75b1704c2dc99324f6bc865c02b1775b4" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.314614 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-lsntj" event={"ID":"142aa03d-77a5-40d1-a99f-8d7d33338721","Type":"ContainerDied","Data":"d582e156dd6426194ee12881765ec0dc4c67374240c4cccb9b2ba9289543a024"} Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.314678 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-lsntj" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.326563 4787 generic.go:334] "Generic (PLEG): container finished" podID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerID="5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b" exitCode=0 Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.326609 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-696b9bdfd-pnnmf" event={"ID":"3d1018e7-6cf6-4c3e-b351-6249e795620d","Type":"ContainerDied","Data":"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b"} Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.326627 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-696b9bdfd-pnnmf" event={"ID":"3d1018e7-6cf6-4c3e-b351-6249e795620d","Type":"ContainerDied","Data":"cf3d3432082d77bfa118e1d58ea109e9770af02e106143d233074811c87afdc2"} Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.329929 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-696b9bdfd-pnnmf" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.351197 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" exitCode=0 Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.351230 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0"} Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.351665 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:40:59 crc kubenswrapper[4787]: E0129 13:40:59.351882 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.377170 4787 scope.go:117] "RemoveContainer" containerID="a6c0ad3143ab1f25e2f8fefaf9710ebb5e0dda180c2744c012d09871106cf7a3" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.431226 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.432403 4787 scope.go:117] "RemoveContainer" containerID="5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.445887 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-lsntj"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.479605 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.493913 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-696b9bdfd-pnnmf"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.496971 4787 scope.go:117] "RemoveContainer" containerID="5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b" Jan 29 13:40:59 crc kubenswrapper[4787]: E0129 13:40:59.498896 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b\": container with ID starting with 5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b not found: ID does not exist" containerID="5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.498937 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b"} err="failed to get container status \"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b\": rpc error: code = NotFound desc = could not find container \"5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b\": container with ID starting with 5e49a792bd15d202e00c9c20d2a5c850f776285412b7c08a5fb357235f81996b not found: ID does not exist" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.498965 4787 scope.go:117] "RemoveContainer" containerID="845d9853bc8431c6707c4ecc6659b35b630563236beea9cc4f52a93c1c065e94" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.501316 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.508494 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.995898 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="142aa03d-77a5-40d1-a99f-8d7d33338721" path="/var/lib/kubelet/pods/142aa03d-77a5-40d1-a99f-8d7d33338721/volumes" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.996581 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" path="/var/lib/kubelet/pods/3d1018e7-6cf6-4c3e-b351-6249e795620d/volumes" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.997256 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" path="/var/lib/kubelet/pods/6285155e-2d1b-4c6f-be33-5f2681a7b5e0/volumes" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.998339 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" path="/var/lib/kubelet/pods/7b3f405a-2fa1-4afe-8364-60489fc271ca/volumes" Jan 29 13:40:59 crc kubenswrapper[4787]: I0129 13:40:59.998975 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" path="/var/lib/kubelet/pods/c27d0b15-3660-4d2c-b5f1-89392d93317f/volumes" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.187369 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.211740 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299447 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom\") pod \"00f5493b-e570-4684-b7ae-9af7154b3e51\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299528 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs\") pod \"0e48c5bf-c285-446e-a91e-fe216f819f05\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299560 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle\") pod \"0e48c5bf-c285-446e-a91e-fe216f819f05\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299589 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffn97\" (UniqueName: \"kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97\") pod \"00f5493b-e570-4684-b7ae-9af7154b3e51\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299677 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccz8k\" (UniqueName: \"kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k\") pod \"0e48c5bf-c285-446e-a91e-fe216f819f05\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.299815 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data\") pod \"0e48c5bf-c285-446e-a91e-fe216f819f05\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.300201 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle\") pod \"00f5493b-e570-4684-b7ae-9af7154b3e51\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.300264 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs\") pod \"00f5493b-e570-4684-b7ae-9af7154b3e51\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.300293 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom\") pod \"0e48c5bf-c285-446e-a91e-fe216f819f05\" (UID: \"0e48c5bf-c285-446e-a91e-fe216f819f05\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.300312 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data\") pod \"00f5493b-e570-4684-b7ae-9af7154b3e51\" (UID: \"00f5493b-e570-4684-b7ae-9af7154b3e51\") " Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.301675 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs" (OuterVolumeSpecName: "logs") pod "0e48c5bf-c285-446e-a91e-fe216f819f05" (UID: "0e48c5bf-c285-446e-a91e-fe216f819f05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.301955 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs" (OuterVolumeSpecName: "logs") pod "00f5493b-e570-4684-b7ae-9af7154b3e51" (UID: "00f5493b-e570-4684-b7ae-9af7154b3e51"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.308536 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "00f5493b-e570-4684-b7ae-9af7154b3e51" (UID: "00f5493b-e570-4684-b7ae-9af7154b3e51"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.317126 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0e48c5bf-c285-446e-a91e-fe216f819f05" (UID: "0e48c5bf-c285-446e-a91e-fe216f819f05"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.318603 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97" (OuterVolumeSpecName: "kube-api-access-ffn97") pod "00f5493b-e570-4684-b7ae-9af7154b3e51" (UID: "00f5493b-e570-4684-b7ae-9af7154b3e51"). InnerVolumeSpecName "kube-api-access-ffn97". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.318728 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k" (OuterVolumeSpecName: "kube-api-access-ccz8k") pod "0e48c5bf-c285-446e-a91e-fe216f819f05" (UID: "0e48c5bf-c285-446e-a91e-fe216f819f05"). InnerVolumeSpecName "kube-api-access-ccz8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.323196 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00f5493b-e570-4684-b7ae-9af7154b3e51" (UID: "00f5493b-e570-4684-b7ae-9af7154b3e51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.359574 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e48c5bf-c285-446e-a91e-fe216f819f05" (UID: "0e48c5bf-c285-446e-a91e-fe216f819f05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.374666 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data" (OuterVolumeSpecName: "config-data") pod "0e48c5bf-c285-446e-a91e-fe216f819f05" (UID: "0e48c5bf-c285-446e-a91e-fe216f819f05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.379150 4787 generic.go:334] "Generic (PLEG): container finished" podID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerID="01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3" exitCode=0 Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.379235 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerDied","Data":"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3"} Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.379266 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-64fc7f548f-h8fjw" event={"ID":"0e48c5bf-c285-446e-a91e-fe216f819f05","Type":"ContainerDied","Data":"35dde23ebeadb65cd16425b28f0ca3da7f087aac2a6397d4362de506f9ab09c9"} Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.379286 4787 scope.go:117] "RemoveContainer" containerID="01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.379402 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-64fc7f548f-h8fjw" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412611 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412646 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/00f5493b-e570-4684-b7ae-9af7154b3e51-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412658 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412669 4787 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412681 4787 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e48c5bf-c285-446e-a91e-fe216f819f05-logs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412691 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412703 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffn97\" (UniqueName: \"kubernetes.io/projected/00f5493b-e570-4684-b7ae-9af7154b3e51-kube-api-access-ffn97\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412715 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccz8k\" (UniqueName: \"kubernetes.io/projected/0e48c5bf-c285-446e-a91e-fe216f819f05-kube-api-access-ccz8k\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.412724 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e48c5bf-c285-446e-a91e-fe216f819f05-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.461590 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data" (OuterVolumeSpecName: "config-data") pod "00f5493b-e570-4684-b7ae-9af7154b3e51" (UID: "00f5493b-e570-4684-b7ae-9af7154b3e51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.469742 4787 generic.go:334] "Generic (PLEG): container finished" podID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerID="03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1" exitCode=0 Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.469962 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerDied","Data":"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1"} Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.470083 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" event={"ID":"00f5493b-e570-4684-b7ae-9af7154b3e51","Type":"ContainerDied","Data":"0ee4250ebe7ac6d2e09a58e53f3ae655d528d7e62f8962ddf297f31e95429afd"} Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.470224 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6f66c4d958-z5ntb" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.482643 4787 scope.go:117] "RemoveContainer" containerID="0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.508953 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.513583 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00f5493b-e570-4684-b7ae-9af7154b3e51-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.517767 4787 scope.go:117] "RemoveContainer" containerID="01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3" Jan 29 13:41:00 crc kubenswrapper[4787]: E0129 13:41:00.518148 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3\": container with ID starting with 01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3 not found: ID does not exist" containerID="01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.518179 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3"} err="failed to get container status \"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3\": rpc error: code = NotFound desc = could not find container \"01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3\": container with ID starting with 01c2a88a26be270f11da868113eeded3e65f89cdfa90ae96c7a181c6730ee9b3 not found: ID does not exist" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.518196 4787 scope.go:117] "RemoveContainer" containerID="0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f" Jan 29 13:41:00 crc kubenswrapper[4787]: E0129 13:41:00.518362 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f\": container with ID starting with 0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f not found: ID does not exist" containerID="0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.518380 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f"} err="failed to get container status \"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f\": rpc error: code = NotFound desc = could not find container \"0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f\": container with ID starting with 0c0f9f52675463ee063bc2ccbff274982697178c50cdd0fd332429f2de47ca5f not found: ID does not exist" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.518391 4787 scope.go:117] "RemoveContainer" containerID="03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.519203 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-64fc7f548f-h8fjw"] Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.540545 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.543268 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-6f66c4d958-z5ntb"] Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.618419 4787 scope.go:117] "RemoveContainer" containerID="b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.767141 4787 scope.go:117] "RemoveContainer" containerID="03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1" Jan 29 13:41:00 crc kubenswrapper[4787]: E0129 13:41:00.767695 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1\": container with ID starting with 03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1 not found: ID does not exist" containerID="03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.767737 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1"} err="failed to get container status \"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1\": rpc error: code = NotFound desc = could not find container \"03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1\": container with ID starting with 03a82e1b9bc58c12f05506f1f2ce412fc8410bb019e7100f62e0892e9b56f2b1 not found: ID does not exist" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.767771 4787 scope.go:117] "RemoveContainer" containerID="b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c" Jan 29 13:41:00 crc kubenswrapper[4787]: E0129 13:41:00.768166 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c\": container with ID starting with b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c not found: ID does not exist" containerID="b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c" Jan 29 13:41:00 crc kubenswrapper[4787]: I0129 13:41:00.768204 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c"} err="failed to get container status \"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c\": rpc error: code = NotFound desc = could not find container \"b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c\": container with ID starting with b8b64ad75a322bc4108aad318d88c01ea3162e658978d5e1f49f941ffc2e325c not found: ID does not exist" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.004081 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" path="/var/lib/kubelet/pods/00f5493b-e570-4684-b7ae-9af7154b3e51/volumes" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.005756 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" path="/var/lib/kubelet/pods/0e48c5bf-c285-446e-a91e-fe216f819f05/volumes" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.499593 4787 generic.go:334] "Generic (PLEG): container finished" podID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerID="4505f4e6e319771bc3829367ee02ff72fbb90f8adde43914841d18e31337e0bb" exitCode=0 Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.499672 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerDied","Data":"4505f4e6e319771bc3829367ee02ff72fbb90f8adde43914841d18e31337e0bb"} Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.711864 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759191 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759239 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759279 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctmzh\" (UniqueName: \"kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759388 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759472 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759507 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.759541 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config\") pod \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\" (UID: \"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced\") " Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.773997 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.780500 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh" (OuterVolumeSpecName: "kube-api-access-ctmzh") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "kube-api-access-ctmzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.803710 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.808305 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.815789 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config" (OuterVolumeSpecName: "config") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.822860 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.829025 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" (UID: "8169c2f6-489e-43a8-ba7a-4f8abb9f1ced"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861118 4787 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861149 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861159 4787 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861168 4787 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-config\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861176 4787 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861187 4787 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:02 crc kubenswrapper[4787]: I0129 13:41:02.861196 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctmzh\" (UniqueName: \"kubernetes.io/projected/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced-kube-api-access-ctmzh\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.533380 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85444c4b89-hx4zl" event={"ID":"8169c2f6-489e-43a8-ba7a-4f8abb9f1ced","Type":"ContainerDied","Data":"7f6eb5dee970a1f1d2da58a5bd57fdef17f9e5b8a48de2e6d1bb14db37ea8e2e"} Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.533434 4787 scope.go:117] "RemoveContainer" containerID="e1e39ec84e4c856895c68934ffe37b579202f55f241d2985ef82c3428f5a54da" Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.533520 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85444c4b89-hx4zl" Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.574549 4787 scope.go:117] "RemoveContainer" containerID="4505f4e6e319771bc3829367ee02ff72fbb90f8adde43914841d18e31337e0bb" Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.578253 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.586030 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-85444c4b89-hx4zl"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.673873 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.673976 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.675102 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.675287 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.676107 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.676210 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.676118 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:03 crc kubenswrapper[4787]: E0129 13:41:03.676298 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:41:03 crc kubenswrapper[4787]: I0129 13:41:03.993775 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" path="/var/lib/kubelet/pods/8169c2f6-489e-43a8-ba7a-4f8abb9f1ced/volumes" Jan 29 13:41:06 crc kubenswrapper[4787]: E0129 13:41:06.517922 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:41:06 crc kubenswrapper[4787]: E0129 13:41:06.518382 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:41:22.518318557 +0000 UTC m=+1521.279578873 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.672841 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.674289 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.674560 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.676524 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.676541 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.676575 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.678316 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:08 crc kubenswrapper[4787]: E0129 13:41:08.678393 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:41:12 crc kubenswrapper[4787]: I0129 13:41:12.986299 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:41:12 crc kubenswrapper[4787]: E0129 13:41:12.986758 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.672997 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.673509 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.673888 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.673990 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.679022 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.681390 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.683267 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:13 crc kubenswrapper[4787]: E0129 13:41:13.683317 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.672669 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.673874 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.674010 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.674527 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.674590 4787 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.679907 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.683212 4787 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 29 13:41:18 crc kubenswrapper[4787]: E0129 13:41:18.683322 4787 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-2xr6j" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:41:20 crc kubenswrapper[4787]: I0129 13:41:20.729767 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2xr6j_213bfa86-f7a6-48b4-94a0-328352f00e75/ovs-vswitchd/0.log" Jan 29 13:41:20 crc kubenswrapper[4787]: I0129 13:41:20.731166 4787 generic.go:334] "Generic (PLEG): container finished" podID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" exitCode=137 Jan 29 13:41:20 crc kubenswrapper[4787]: I0129 13:41:20.731217 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerDied","Data":"58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce"} Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.294762 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2xr6j_213bfa86-f7a6-48b4-94a0-328352f00e75/ovs-vswitchd/0.log" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.296410 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379293 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379343 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379390 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379403 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379434 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn6ll\" (UniqueName: \"kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379466 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts\") pod \"213bfa86-f7a6-48b4-94a0-328352f00e75\" (UID: \"213bfa86-f7a6-48b4-94a0-328352f00e75\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379610 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379659 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log" (OuterVolumeSpecName: "var-log") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379684 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib" (OuterVolumeSpecName: "var-lib") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379875 4787 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-log\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379893 4787 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-lib\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379905 4787 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.379618 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run" (OuterVolumeSpecName: "var-run") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.380751 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts" (OuterVolumeSpecName: "scripts") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.386128 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll" (OuterVolumeSpecName: "kube-api-access-sn6ll") pod "213bfa86-f7a6-48b4-94a0-328352f00e75" (UID: "213bfa86-f7a6-48b4-94a0-328352f00e75"). InnerVolumeSpecName "kube-api-access-sn6ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.481576 4787 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/213bfa86-f7a6-48b4-94a0-328352f00e75-var-run\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.481606 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn6ll\" (UniqueName: \"kubernetes.io/projected/213bfa86-f7a6-48b4-94a0-328352f00e75-kube-api-access-sn6ll\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.481638 4787 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/213bfa86-f7a6-48b4-94a0-328352f00e75-scripts\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.604116 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691438 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691612 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691696 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kxlb\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691768 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691805 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.691891 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock\") pod \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\" (UID: \"8d475a95-10b2-46bb-a74a-e96b6bf70bfe\") " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.692607 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache" (OuterVolumeSpecName: "cache") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.692720 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock" (OuterVolumeSpecName: "lock") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.693101 4787 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-cache\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.693126 4787 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-lock\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.694923 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.696471 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb" (OuterVolumeSpecName: "kube-api-access-8kxlb") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "kube-api-access-8kxlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.699644 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "swift") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.759251 4787 generic.go:334] "Generic (PLEG): container finished" podID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerID="187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67" exitCode=137 Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.759326 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67"} Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.759387 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"8d475a95-10b2-46bb-a74a-e96b6bf70bfe","Type":"ContainerDied","Data":"ed319c2cfaa1af9786cd58e89b63bdb61e854fda4b5e20a508f0011e00d59fd5"} Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.759405 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.759430 4787 scope.go:117] "RemoveContainer" containerID="187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.765977 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2xr6j_213bfa86-f7a6-48b4-94a0-328352f00e75/ovs-vswitchd/0.log" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.768809 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2xr6j" event={"ID":"213bfa86-f7a6-48b4-94a0-328352f00e75","Type":"ContainerDied","Data":"852487611cc4f8e0e2564c9407ef1ef49a62a00cf0a0904263eef3002495e54a"} Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.768897 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2xr6j" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.794429 4787 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.794507 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.794531 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kxlb\" (UniqueName: \"kubernetes.io/projected/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-kube-api-access-8kxlb\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.812869 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.889786 4787 scope.go:117] "RemoveContainer" containerID="82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.892899 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.895952 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:21 crc kubenswrapper[4787]: E0129 13:41:21.897392 4787 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/swift-storage-0_openstack_rsync-82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289.log: no such file or directory" path="/var/log/containers/swift-storage-0_openstack_rsync-82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289.log" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.902131 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-2xr6j"] Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.917765 4787 scope.go:117] "RemoveContainer" containerID="321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.948377 4787 scope.go:117] "RemoveContainer" containerID="7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.970614 4787 scope.go:117] "RemoveContainer" containerID="d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.990379 4787 scope.go:117] "RemoveContainer" containerID="21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353" Jan 29 13:41:21 crc kubenswrapper[4787]: I0129 13:41:21.998354 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" path="/var/lib/kubelet/pods/213bfa86-f7a6-48b4-94a0-328352f00e75/volumes" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.027396 4787 scope.go:117] "RemoveContainer" containerID="06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.045307 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d475a95-10b2-46bb-a74a-e96b6bf70bfe" (UID: "8d475a95-10b2-46bb-a74a-e96b6bf70bfe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.063195 4787 scope.go:117] "RemoveContainer" containerID="cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.098412 4787 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d475a95-10b2-46bb-a74a-e96b6bf70bfe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.100637 4787 scope.go:117] "RemoveContainer" containerID="db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.105786 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.111097 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.121820 4787 scope.go:117] "RemoveContainer" containerID="3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.143389 4787 scope.go:117] "RemoveContainer" containerID="44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.166580 4787 scope.go:117] "RemoveContainer" containerID="edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.194220 4787 scope.go:117] "RemoveContainer" containerID="939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.220280 4787 scope.go:117] "RemoveContainer" containerID="db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.242345 4787 scope.go:117] "RemoveContainer" containerID="600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.267340 4787 scope.go:117] "RemoveContainer" containerID="187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.267997 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67\": container with ID starting with 187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67 not found: ID does not exist" containerID="187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.268108 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67"} err="failed to get container status \"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67\": rpc error: code = NotFound desc = could not find container \"187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67\": container with ID starting with 187bcb70d7b50eb33ac9ef29071539a0b8ad87f88bef470c5fac2cac6a229e67 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.268214 4787 scope.go:117] "RemoveContainer" containerID="82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.268761 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289\": container with ID starting with 82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289 not found: ID does not exist" containerID="82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.268780 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289"} err="failed to get container status \"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289\": rpc error: code = NotFound desc = could not find container \"82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289\": container with ID starting with 82a735eb702862d4b13244ba2bd86954dd80645e5c2a2bfa114aafa1a530c289 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.268793 4787 scope.go:117] "RemoveContainer" containerID="321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.269186 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b\": container with ID starting with 321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b not found: ID does not exist" containerID="321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.269263 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b"} err="failed to get container status \"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b\": rpc error: code = NotFound desc = could not find container \"321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b\": container with ID starting with 321ab3586eee1eaee80acf39f898f34d434fb6fd732769f81e67275888068b1b not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.269334 4787 scope.go:117] "RemoveContainer" containerID="7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.269962 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274\": container with ID starting with 7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274 not found: ID does not exist" containerID="7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270022 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274"} err="failed to get container status \"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274\": rpc error: code = NotFound desc = could not find container \"7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274\": container with ID starting with 7743ac81a2b53ed0d9dd404c382fb695dae7274c5e48be32c69ee9915c2c2274 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270068 4787 scope.go:117] "RemoveContainer" containerID="d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.270414 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b\": container with ID starting with d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b not found: ID does not exist" containerID="d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270512 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b"} err="failed to get container status \"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b\": rpc error: code = NotFound desc = could not find container \"d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b\": container with ID starting with d48a4a5871f0caed1628d43c75cd417ec3f122052733bc8bce48f221faa9de5b not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270577 4787 scope.go:117] "RemoveContainer" containerID="21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.270963 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353\": container with ID starting with 21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353 not found: ID does not exist" containerID="21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270981 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353"} err="failed to get container status \"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353\": rpc error: code = NotFound desc = could not find container \"21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353\": container with ID starting with 21da3263f125b4ecfdc458ade31b722b1988e647f1fa4d56f38d3741c4b2e353 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.270994 4787 scope.go:117] "RemoveContainer" containerID="06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.271276 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8\": container with ID starting with 06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8 not found: ID does not exist" containerID="06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.271351 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8"} err="failed to get container status \"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8\": rpc error: code = NotFound desc = could not find container \"06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8\": container with ID starting with 06b2754bfbd71626c8929af23c273205ad439fdf6b7fbc04154f5a306570e7d8 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.271439 4787 scope.go:117] "RemoveContainer" containerID="cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.271828 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf\": container with ID starting with cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf not found: ID does not exist" containerID="cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.271873 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf"} err="failed to get container status \"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf\": rpc error: code = NotFound desc = could not find container \"cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf\": container with ID starting with cee3e9035abc90ef7cda420bbd9c3f9b7767045fd61ee4e61040a5fbabca86bf not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.271901 4787 scope.go:117] "RemoveContainer" containerID="db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.272258 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c\": container with ID starting with db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c not found: ID does not exist" containerID="db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.272331 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c"} err="failed to get container status \"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c\": rpc error: code = NotFound desc = could not find container \"db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c\": container with ID starting with db7eddf60c281ce85cf35eec552fcd7d29f0e81abd162e2a5d43e01602fb4c6c not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.272397 4787 scope.go:117] "RemoveContainer" containerID="3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.272756 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197\": container with ID starting with 3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197 not found: ID does not exist" containerID="3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.272865 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197"} err="failed to get container status \"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197\": rpc error: code = NotFound desc = could not find container \"3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197\": container with ID starting with 3d582ad2329ba7d53afc96fc09536c56cf3f5466851da7d0fdbf3702a75c2197 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.272903 4787 scope.go:117] "RemoveContainer" containerID="44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.273420 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad\": container with ID starting with 44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad not found: ID does not exist" containerID="44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.273554 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad"} err="failed to get container status \"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad\": rpc error: code = NotFound desc = could not find container \"44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad\": container with ID starting with 44b70a85773823f6903da9963541570d52aa81fa071968389021f533141835ad not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.273625 4787 scope.go:117] "RemoveContainer" containerID="edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.274051 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0\": container with ID starting with edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0 not found: ID does not exist" containerID="edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.274075 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0"} err="failed to get container status \"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0\": rpc error: code = NotFound desc = could not find container \"edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0\": container with ID starting with edb8482923c237d8683b4aa0846b14c0ed9660a21d0e93066e575cbbb519f6a0 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.274091 4787 scope.go:117] "RemoveContainer" containerID="939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.274354 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be\": container with ID starting with 939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be not found: ID does not exist" containerID="939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.274441 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be"} err="failed to get container status \"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be\": rpc error: code = NotFound desc = could not find container \"939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be\": container with ID starting with 939122a0fba57f9497466af3525f06ee3e276a2484b6d8abbabba4c6bb23b2be not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.274540 4787 scope.go:117] "RemoveContainer" containerID="db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.274963 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4\": container with ID starting with db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4 not found: ID does not exist" containerID="db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.275029 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4"} err="failed to get container status \"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4\": rpc error: code = NotFound desc = could not find container \"db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4\": container with ID starting with db89b9aa385ac625155d4c961e52c110cddc6090d2a32b44eff1e1c60a2ccab4 not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.275086 4787 scope.go:117] "RemoveContainer" containerID="600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.275559 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e\": container with ID starting with 600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e not found: ID does not exist" containerID="600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.275660 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e"} err="failed to get container status \"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e\": rpc error: code = NotFound desc = could not find container \"600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e\": container with ID starting with 600853ecf3bb88ea7ea19418932e16a55e534068ec850830a5f07fbf73bbc24e not found: ID does not exist" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.275799 4787 scope.go:117] "RemoveContainer" containerID="58c9b168b9d5d6572104a5dc66685336417324d4d3f2f6df3c21a8b9457c17ce" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.293184 4787 scope.go:117] "RemoveContainer" containerID="eb24d74378934fb185980daf404d02a2ead4a92b0ecd00ac8693cd17912c8f22" Jan 29 13:41:22 crc kubenswrapper[4787]: I0129 13:41:22.311356 4787 scope.go:117] "RemoveContainer" containerID="1233336481d0ba05ac8394b5e90213caf24b9a059da5f8a98c48bb5d1c70ff2b" Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.616249 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:41:22 crc kubenswrapper[4787]: E0129 13:41:22.616346 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:41:54.616322231 +0000 UTC m=+1553.377582517 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:41:24 crc kubenswrapper[4787]: I0129 13:41:24.008552 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" path="/var/lib/kubelet/pods/8d475a95-10b2-46bb-a74a-e96b6bf70bfe/volumes" Jan 29 13:41:27 crc kubenswrapper[4787]: I0129 13:41:27.985964 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:41:27 crc kubenswrapper[4787]: E0129 13:41:27.986690 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:41:38 crc kubenswrapper[4787]: I0129 13:41:38.986669 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:41:38 crc kubenswrapper[4787]: E0129 13:41:38.988096 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:41:51 crc kubenswrapper[4787]: I0129 13:41:51.988916 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:41:51 crc kubenswrapper[4787]: E0129 13:41:51.990722 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:41:54 crc kubenswrapper[4787]: E0129 13:41:54.641359 4787 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 29 13:41:54 crc kubenswrapper[4787]: E0129 13:41:54.641834 4787 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data podName:a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5 nodeName:}" failed. No retries permitted until 2026-01-29 13:42:58.641807996 +0000 UTC m=+1617.403068302 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data") pod "rabbitmq-server-0" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5") : configmap "rabbitmq-config-data" not found Jan 29 13:41:55 crc kubenswrapper[4787]: E0129 13:41:55.864259 4787 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 29 13:41:55 crc kubenswrapper[4787]: command '/bin/bash -c if [ ! -z "$(cat /etc/pod-info/skipPreStopChecks)" ]; then exit 0; fi; rabbitmq-upgrade await_online_quorum_plus_one -t 604800 && rabbitmq-upgrade await_online_synchronized_mirror -t 604800 || true && rabbitmq-upgrade drain -t 604800' exited with 69: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-422-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-749-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: > execCommand=["/bin/bash","-c","if [ ! -z \"$(cat /etc/pod-info/skipPreStopChecks)\" ]; then exit 0; fi; rabbitmq-upgrade await_online_quorum_plus_one -t 604800 \u0026\u0026 rabbitmq-upgrade await_online_synchronized_mirror -t 604800 || true \u0026\u0026 rabbitmq-upgrade drain -t 604800"] containerName="rabbitmq" pod="openstack/rabbitmq-server-0" message=< Jan 29 13:41:55 crc kubenswrapper[4787]: Will put node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack into maintenance mode. The node will no longer serve any client traffic! Jan 29 13:41:55 crc kubenswrapper[4787]: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-422-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-749-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: > Jan 29 13:41:55 crc kubenswrapper[4787]: E0129 13:41:55.864317 4787 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 29 13:41:55 crc kubenswrapper[4787]: command '/bin/bash -c if [ ! -z "$(cat /etc/pod-info/skipPreStopChecks)" ]; then exit 0; fi; rabbitmq-upgrade await_online_quorum_plus_one -t 604800 && rabbitmq-upgrade await_online_synchronized_mirror -t 604800 || true && rabbitmq-upgrade drain -t 604800' exited with 69: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-422-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Error: unable to perform an operation on node 'rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'. Please see diagnostics information and suggestions below. Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Most common reasons for this are: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is unreachable (e.g. due to hostname resolution, TCP connection or firewall issues) Jan 29 13:41:55 crc kubenswrapper[4787]: * CLI tool fails to authenticate with the server (e.g. due to CLI tool's Erlang cookie not matching that of the server) Jan 29 13:41:55 crc kubenswrapper[4787]: * Target node is not running Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: In addition to the diagnostics info below: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: * See the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more Jan 29 13:41:55 crc kubenswrapper[4787]: * Consult server logs on node rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack Jan 29 13:41:55 crc kubenswrapper[4787]: * If target node is configured to use long node names, don't forget to use --longnames with CLI tools Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: DIAGNOSTICS Jan 29 13:41:55 crc kubenswrapper[4787]: =========== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: attempted to contact: ['rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack'] Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack: Jan 29 13:41:55 crc kubenswrapper[4787]: * unable to connect to epmd (port 4369) on rabbitmq-server-0.rabbitmq-nodes.openstack: nxdomain (non-existing domain) Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: Current node details: Jan 29 13:41:55 crc kubenswrapper[4787]: * node name: 'rabbitmqcli-749-rabbit@rabbitmq-server-0.rabbitmq-nodes.openstack' Jan 29 13:41:55 crc kubenswrapper[4787]: * effective user's home directory: /var/lib/rabbitmq Jan 29 13:41:55 crc kubenswrapper[4787]: * Erlang cookie hash: /DRtlR8TTFnsuPTnhY38vg== Jan 29 13:41:55 crc kubenswrapper[4787]: Jan 29 13:41:55 crc kubenswrapper[4787]: > pod="openstack/rabbitmq-server-0" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" containerID="cri-o://e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" Jan 29 13:41:55 crc kubenswrapper[4787]: I0129 13:41:55.864361 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" containerID="cri-o://e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" gracePeriod=604738 Jan 29 13:41:59 crc kubenswrapper[4787]: I0129 13:41:59.977521 4787 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.584564 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659491 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659533 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c8lf\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659575 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659601 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659675 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659706 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659747 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.659810 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.660393 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.660441 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.660502 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret\") pod \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\" (UID: \"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5\") " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.660145 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.660235 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.661941 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.665346 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.665533 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.665647 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info" (OuterVolumeSpecName: "pod-info") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.665982 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf" (OuterVolumeSpecName: "kube-api-access-2c8lf") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "kube-api-access-2c8lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.667021 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.684858 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data" (OuterVolumeSpecName: "config-data") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.698963 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf" (OuterVolumeSpecName: "server-conf") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.726346 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" (UID: "a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762188 4787 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-pod-info\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762222 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762232 4787 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762242 4787 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762250 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c8lf\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-kube-api-access-2c8lf\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762259 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762267 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762275 4787 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-server-conf\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762282 4787 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762317 4787 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.762326 4787 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5-config-data\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.775585 4787 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.863775 4787 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:02 crc kubenswrapper[4787]: I0129 13:42:02.986209 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:42:02 crc kubenswrapper[4787]: E0129 13:42:02.986996 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.203017 4787 generic.go:334] "Generic (PLEG): container finished" podID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerID="e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" exitCode=0 Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.203084 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.203099 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerDied","Data":"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d"} Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.203175 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5","Type":"ContainerDied","Data":"d3be32187af9f191f0daadc8b03aa3cf2809d35ae4ae9654f8696fe9eb1589dd"} Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.203212 4787 scope.go:117] "RemoveContainer" containerID="e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.256857 4787 scope.go:117] "RemoveContainer" containerID="fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.283577 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.290954 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.298606 4787 scope.go:117] "RemoveContainer" containerID="e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" Jan 29 13:42:03 crc kubenswrapper[4787]: E0129 13:42:03.299273 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d\": container with ID starting with e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d not found: ID does not exist" containerID="e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.299341 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d"} err="failed to get container status \"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d\": rpc error: code = NotFound desc = could not find container \"e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d\": container with ID starting with e5432e0e0e4b01a5fa261a2c2ca8eb171562ffe9546078f95c518dea66bd3e5d not found: ID does not exist" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.299380 4787 scope.go:117] "RemoveContainer" containerID="fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413" Jan 29 13:42:03 crc kubenswrapper[4787]: E0129 13:42:03.301124 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413\": container with ID starting with fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413 not found: ID does not exist" containerID="fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.301180 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413"} err="failed to get container status \"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413\": rpc error: code = NotFound desc = could not find container \"fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413\": container with ID starting with fb32bada6e5c6dc92233e7364a42cefe3b6be7686dc0400c63a06f9fd39bb413 not found: ID does not exist" Jan 29 13:42:03 crc kubenswrapper[4787]: I0129 13:42:03.999427 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" path="/var/lib/kubelet/pods/a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5/volumes" Jan 29 13:42:17 crc kubenswrapper[4787]: I0129 13:42:17.985510 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:42:17 crc kubenswrapper[4787]: E0129 13:42:17.986195 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:42:21 crc kubenswrapper[4787]: I0129 13:42:21.900503 4787 scope.go:117] "RemoveContainer" containerID="dfb03cfdd6c75dcc1d308c835ace9090bb9e4ec6b5eeac917065641d760e1a38" Jan 29 13:42:21 crc kubenswrapper[4787]: I0129 13:42:21.921835 4787 scope.go:117] "RemoveContainer" containerID="ba5db01e0916c4c88d6a2536d7cef2d0569d87e7483b40333d2ad311475774dd" Jan 29 13:42:21 crc kubenswrapper[4787]: I0129 13:42:21.972249 4787 scope.go:117] "RemoveContainer" containerID="c3fa4b959b699ba45eacce41c7659658e2e9f1bdb045347aa100e24db697944e" Jan 29 13:42:21 crc kubenswrapper[4787]: I0129 13:42:21.987697 4787 scope.go:117] "RemoveContainer" containerID="847559ad6f873e60f8c0719c850857931fb348773e273e63f65fe30b1ec026f0" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.022042 4787 scope.go:117] "RemoveContainer" containerID="7de70d403eec1d4bc647f68435fe451159d0ea3927072482318db17e1345560d" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.042656 4787 scope.go:117] "RemoveContainer" containerID="392454853d694c87aee62f7a61f6214cfe1b02f5a83cef2c0a2fb8b1edcea033" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.069177 4787 scope.go:117] "RemoveContainer" containerID="468de5bd92c2ecc0ab49c8f14f45a5dcafa2c582c73d2fb214ca85a44e720357" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.085132 4787 scope.go:117] "RemoveContainer" containerID="393993cd1c1f3ad4ab9f870f012e6c9668994eba30a91b7a9ee18082dd2d2fe0" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.099020 4787 scope.go:117] "RemoveContainer" containerID="40a5f44d8467a9435485b29a34515f70ad41f84edfb257cc8246ef8156bb7e1c" Jan 29 13:42:22 crc kubenswrapper[4787]: I0129 13:42:22.113501 4787 scope.go:117] "RemoveContainer" containerID="82d697dced0239e8241f4d84cecf64d41679b7b1323a0e0ba8d9f1e602edbb31" Jan 29 13:42:32 crc kubenswrapper[4787]: I0129 13:42:32.986319 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:42:32 crc kubenswrapper[4787]: E0129 13:42:32.986982 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:42:43 crc kubenswrapper[4787]: I0129 13:42:43.985814 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:42:43 crc kubenswrapper[4787]: E0129 13:42:43.987049 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.077826 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078767 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078792 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078817 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078831 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078859 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="sg-core" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078872 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="sg-core" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078887 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078902 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078929 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078941 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.078966 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.078980 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079001 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerName="nova-cell1-conductor-conductor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079014 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerName="nova-cell1-conductor-conductor" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079031 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079044 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079067 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079082 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079108 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerName="keystone-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079121 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerName="keystone-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079139 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="mysql-bootstrap" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079151 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="mysql-bootstrap" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079173 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079186 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079207 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079222 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079247 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079259 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079278 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079291 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079311 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079324 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-server" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079350 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="setup-container" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079363 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="setup-container" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079382 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079395 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079444 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server-init" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079479 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server-init" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079498 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-reaper" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079511 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-reaper" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079537 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079550 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079564 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079577 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079593 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="setup-container" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079605 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="setup-container" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079630 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="openstack-network-exporter" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079642 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="openstack-network-exporter" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079664 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079678 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079699 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079712 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079736 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079749 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079771 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="swift-recon-cron" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079784 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="swift-recon-cron" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079949 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079961 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.079978 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.079990 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-server" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080025 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="galera" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080038 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="galera" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080054 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080067 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080088 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080101 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080116 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080129 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-server" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080144 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080156 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080178 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080191 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080209 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080221 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080244 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080256 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080271 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080284 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080306 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="proxy-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080318 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="proxy-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080339 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="rsync" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080352 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="rsync" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080368 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080381 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080396 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080409 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080426 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080439 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080492 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-expirer" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080506 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-expirer" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080525 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080538 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080555 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080568 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080587 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080601 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener-log" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080623 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" containerName="kube-state-metrics" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080635 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" containerName="kube-state-metrics" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080656 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerName="nova-scheduler-scheduler" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080669 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerName="nova-scheduler-scheduler" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080688 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080701 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080724 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-central-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080737 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-central-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080754 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-notification-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080768 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-notification-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080788 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d95df36d-a737-4136-8921-01fe4e028add" containerName="memcached" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080801 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d95df36d-a737-4136-8921-01fe4e028add" containerName="memcached" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080826 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080840 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: E0129 13:42:45.080854 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="ovn-northd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.080866 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="ovn-northd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081120 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d21a5fb3-2d4b-4b53-8fe6-45fe636362b4" containerName="kube-state-metrics" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081142 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081169 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="rsync" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081192 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="swift-recon-cron" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081212 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-expirer" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081236 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081262 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="56183615-9f6d-4fc8-8ff9-4856929e5d28" containerName="nova-scheduler-scheduler" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081284 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081299 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081314 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d1018e7-6cf6-4c3e-b351-6249e795620d" containerName="keystone-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081329 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="proxy-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081352 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081369 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081385 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081399 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081416 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081441 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d04fd2-43ca-483e-9ca8-4ba67fcf69f5" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081483 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081501 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="67675e2f-3a2b-4552-bbd5-c12b3ba3a505" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081514 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081530 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="6285155e-2d1b-4c6f-be33-5f2681a7b5e0" containerName="rabbitmq" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081547 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081564 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081589 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="openstack-network-exporter" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081603 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-updater" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081619 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081634 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f204ba0-4972-4e50-9c21-e9639ef73ff3" containerName="barbican-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081654 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="sg-core" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081669 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovs-vswitchd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081683 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d95df36d-a737-4136-8921-01fe4e028add" containerName="memcached" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081702 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081721 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="93f58b7a-13c3-49ef-8c78-a5931438cba6" containerName="nova-metadata-metadata" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081741 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b3f405a-2fa1-4afe-8364-60489fc271ca" containerName="galera" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081761 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="213bfa86-f7a6-48b4-94a0-328352f00e75" containerName="ovsdb-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081779 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081793 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdeb3ae9-0105-40e4-889d-7d9ab0be4427" containerName="ovn-northd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081812 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b19d6d2-57d4-4c0b-aa0d-7184ea42da0a" containerName="glance-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081834 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9df2172-145d-4edd-8d1c-7cc6768840bb" containerName="nova-api-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081855 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="00f5493b-e570-4684-b7ae-9af7154b3e51" containerName="barbican-keystone-listener-log" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081870 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="container-server" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081886 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-notification-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081909 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="f73803d0-ec9b-4483-a509-7bff9afb1d85" containerName="nova-cell1-conductor-conductor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081933 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="383ed8f7-22dd-49b6-a932-6425cc62a6d1" containerName="ovn-controller" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081952 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="1287d5ec-d072-43ba-b553-6d2d229b7c6c" containerName="cinder-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081967 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c27d0b15-3660-4d2c-b5f1-89392d93317f" containerName="ceilometer-central-agent" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081981 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-auditor" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.081997 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.082018 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8169c2f6-489e-43a8-ba7a-4f8abb9f1ced" containerName="neutron-httpd" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.082032 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="account-reaper" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.082048 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e48c5bf-c285-446e-a91e-fe216f819f05" containerName="barbican-worker" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.082067 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d475a95-10b2-46bb-a74a-e96b6bf70bfe" containerName="object-replicator" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.082085 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="87eff82d-823f-44a9-b96b-fed35701c54b" containerName="placement-api" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.083837 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.101097 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.227297 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl9vn\" (UniqueName: \"kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.227475 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.227613 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.329402 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.329529 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.329625 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl9vn\" (UniqueName: \"kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.329957 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.330126 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.349502 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl9vn\" (UniqueName: \"kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn\") pod \"redhat-marketplace-wwf27\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.413830 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:45 crc kubenswrapper[4787]: I0129 13:42:45.876900 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:46 crc kubenswrapper[4787]: I0129 13:42:46.633048 4787 generic.go:334] "Generic (PLEG): container finished" podID="e5ab416f-744b-426e-a900-695911fafdb2" containerID="ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab" exitCode=0 Jan 29 13:42:46 crc kubenswrapper[4787]: I0129 13:42:46.633134 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerDied","Data":"ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab"} Jan 29 13:42:46 crc kubenswrapper[4787]: I0129 13:42:46.633371 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerStarted","Data":"9efc6d57312ce3be340fd12bb5ee9622c9b05eb96ae6f699608c7b87aedb0efd"} Jan 29 13:42:48 crc kubenswrapper[4787]: I0129 13:42:48.665186 4787 generic.go:334] "Generic (PLEG): container finished" podID="e5ab416f-744b-426e-a900-695911fafdb2" containerID="59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32" exitCode=0 Jan 29 13:42:48 crc kubenswrapper[4787]: I0129 13:42:48.665328 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerDied","Data":"59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32"} Jan 29 13:42:49 crc kubenswrapper[4787]: I0129 13:42:49.678281 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerStarted","Data":"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65"} Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.414074 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.414520 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.480734 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.511443 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wwf27" podStartSLOduration=7.966748545 podStartE2EDuration="10.511421099s" podCreationTimestamp="2026-01-29 13:42:45 +0000 UTC" firstStartedPulling="2026-01-29 13:42:46.635845709 +0000 UTC m=+1605.397105995" lastFinishedPulling="2026-01-29 13:42:49.180518263 +0000 UTC m=+1607.941778549" observedRunningTime="2026-01-29 13:42:49.703925494 +0000 UTC m=+1608.465185780" watchObservedRunningTime="2026-01-29 13:42:55.511421099 +0000 UTC m=+1614.272681385" Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.780438 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.825162 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:55 crc kubenswrapper[4787]: I0129 13:42:55.985347 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:42:55 crc kubenswrapper[4787]: E0129 13:42:55.985588 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:42:57 crc kubenswrapper[4787]: I0129 13:42:57.759296 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wwf27" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="registry-server" containerID="cri-o://03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65" gracePeriod=2 Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.217446 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.321639 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content\") pod \"e5ab416f-744b-426e-a900-695911fafdb2\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.321734 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities\") pod \"e5ab416f-744b-426e-a900-695911fafdb2\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.321807 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl9vn\" (UniqueName: \"kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn\") pod \"e5ab416f-744b-426e-a900-695911fafdb2\" (UID: \"e5ab416f-744b-426e-a900-695911fafdb2\") " Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.323775 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities" (OuterVolumeSpecName: "utilities") pod "e5ab416f-744b-426e-a900-695911fafdb2" (UID: "e5ab416f-744b-426e-a900-695911fafdb2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.332612 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn" (OuterVolumeSpecName: "kube-api-access-bl9vn") pod "e5ab416f-744b-426e-a900-695911fafdb2" (UID: "e5ab416f-744b-426e-a900-695911fafdb2"). InnerVolumeSpecName "kube-api-access-bl9vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.346780 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5ab416f-744b-426e-a900-695911fafdb2" (UID: "e5ab416f-744b-426e-a900-695911fafdb2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.423967 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl9vn\" (UniqueName: \"kubernetes.io/projected/e5ab416f-744b-426e-a900-695911fafdb2-kube-api-access-bl9vn\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.424009 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.424027 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5ab416f-744b-426e-a900-695911fafdb2-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.770337 4787 generic.go:334] "Generic (PLEG): container finished" podID="e5ab416f-744b-426e-a900-695911fafdb2" containerID="03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65" exitCode=0 Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.770397 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerDied","Data":"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65"} Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.770436 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wwf27" event={"ID":"e5ab416f-744b-426e-a900-695911fafdb2","Type":"ContainerDied","Data":"9efc6d57312ce3be340fd12bb5ee9622c9b05eb96ae6f699608c7b87aedb0efd"} Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.770485 4787 scope.go:117] "RemoveContainer" containerID="03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.770650 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wwf27" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.803488 4787 scope.go:117] "RemoveContainer" containerID="59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.809355 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.818226 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wwf27"] Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.835770 4787 scope.go:117] "RemoveContainer" containerID="ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.859728 4787 scope.go:117] "RemoveContainer" containerID="03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65" Jan 29 13:42:58 crc kubenswrapper[4787]: E0129 13:42:58.860439 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65\": container with ID starting with 03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65 not found: ID does not exist" containerID="03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.860494 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65"} err="failed to get container status \"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65\": rpc error: code = NotFound desc = could not find container \"03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65\": container with ID starting with 03b1085e45d4a91639c62f442d361d1b62ec5c7030af2f7192056663ca109b65 not found: ID does not exist" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.860521 4787 scope.go:117] "RemoveContainer" containerID="59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32" Jan 29 13:42:58 crc kubenswrapper[4787]: E0129 13:42:58.861063 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32\": container with ID starting with 59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32 not found: ID does not exist" containerID="59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.861094 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32"} err="failed to get container status \"59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32\": rpc error: code = NotFound desc = could not find container \"59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32\": container with ID starting with 59ea5f5763643e6a2e0630b72de3c4fbc0f33c118e9e700f4c3fc34de5670a32 not found: ID does not exist" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.861114 4787 scope.go:117] "RemoveContainer" containerID="ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab" Jan 29 13:42:58 crc kubenswrapper[4787]: E0129 13:42:58.861494 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab\": container with ID starting with ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab not found: ID does not exist" containerID="ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab" Jan 29 13:42:58 crc kubenswrapper[4787]: I0129 13:42:58.861514 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab"} err="failed to get container status \"ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab\": rpc error: code = NotFound desc = could not find container \"ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab\": container with ID starting with ebfeee8aca4e40b3116c293fbc034db195c9cea7fed0e2149a527c2224bdc9ab not found: ID does not exist" Jan 29 13:42:59 crc kubenswrapper[4787]: I0129 13:42:59.998022 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ab416f-744b-426e-a900-695911fafdb2" path="/var/lib/kubelet/pods/e5ab416f-744b-426e-a900-695911fafdb2/volumes" Jan 29 13:43:06 crc kubenswrapper[4787]: I0129 13:43:06.985793 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:43:06 crc kubenswrapper[4787]: E0129 13:43:06.986257 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:43:20 crc kubenswrapper[4787]: I0129 13:43:20.986593 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:43:20 crc kubenswrapper[4787]: E0129 13:43:20.987971 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.266220 4787 scope.go:117] "RemoveContainer" containerID="3037de86958a058bff127ccea5ba7107871ceffc84b77ab0c6edad39caa969c6" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.301251 4787 scope.go:117] "RemoveContainer" containerID="515a9ad605bcfe4d86f0114f11b93eddf063a9c6ad24638ac06c33b9c01f592e" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.369785 4787 scope.go:117] "RemoveContainer" containerID="8ce05c87ef36708b8a80fbb2157bdd70fc766fc823b0f2bab968990b9bb3adb4" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.427740 4787 scope.go:117] "RemoveContainer" containerID="785c1e7301248da493293904a73624de786cfb2556a39349b67654eabdef4cce" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.464752 4787 scope.go:117] "RemoveContainer" containerID="e1b6628372053d9ce300b29e161a3428b69bea52b970a5b5a24b898c530045d1" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.496043 4787 scope.go:117] "RemoveContainer" containerID="9a3e281e4dd5be1e9e0bcf9ef19644f02ecd2eb343bc9fdd17548cccefe639cd" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.514059 4787 scope.go:117] "RemoveContainer" containerID="725d17891c6e69acd5172d8d256ef2ce608f642076839459bd1443935ee559be" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.542680 4787 scope.go:117] "RemoveContainer" containerID="8f10ec30349c15378a4bec9d811f8d1bc66a9162353f7017ea7de25fe5af7928" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.570935 4787 scope.go:117] "RemoveContainer" containerID="23f1b5e0219271a3b5ec4767dfc35d3f262582364b3cf8063f3e6a39fa559f84" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.595556 4787 scope.go:117] "RemoveContainer" containerID="736791f07d105c1f37aa90173faebd8d5e29477a8d86b9bfda3cfa33aab306fa" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.632554 4787 scope.go:117] "RemoveContainer" containerID="724e336ca6bd3ec15b6bd66148b9cbec2928b11b984c9a6bb592e88e19b45827" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.650977 4787 scope.go:117] "RemoveContainer" containerID="72b2ec0c4cd5c043ceebdfa345faa2e0f31d571e8b19d5ab485f4d1a72bf4bc4" Jan 29 13:43:22 crc kubenswrapper[4787]: I0129 13:43:22.673399 4787 scope.go:117] "RemoveContainer" containerID="c85f5b8912965f59076414fe6d1f7dcf416f22854bbc47dfb3585b9813f486d6" Jan 29 13:43:33 crc kubenswrapper[4787]: I0129 13:43:33.985862 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:43:33 crc kubenswrapper[4787]: E0129 13:43:33.986588 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:43:46 crc kubenswrapper[4787]: I0129 13:43:46.985918 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:43:46 crc kubenswrapper[4787]: E0129 13:43:46.986356 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:43:57 crc kubenswrapper[4787]: I0129 13:43:57.985829 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:43:57 crc kubenswrapper[4787]: E0129 13:43:57.986624 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:44:09 crc kubenswrapper[4787]: I0129 13:44:09.986119 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:44:09 crc kubenswrapper[4787]: E0129 13:44:09.987043 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:44:23 crc kubenswrapper[4787]: I0129 13:44:23.022342 4787 scope.go:117] "RemoveContainer" containerID="a9a6511c15870cb7985525c82b23681be5038d9f50adfd768e90990683aa7089" Jan 29 13:44:23 crc kubenswrapper[4787]: I0129 13:44:23.076891 4787 scope.go:117] "RemoveContainer" containerID="dc7044db9f24464b95289a083efb431b8b4cd106cf268091236b9976bf84b435" Jan 29 13:44:24 crc kubenswrapper[4787]: I0129 13:44:24.985725 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:44:24 crc kubenswrapper[4787]: E0129 13:44:24.986445 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:44:36 crc kubenswrapper[4787]: I0129 13:44:36.986137 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:44:36 crc kubenswrapper[4787]: E0129 13:44:36.987132 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:44:48 crc kubenswrapper[4787]: I0129 13:44:48.985878 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:44:48 crc kubenswrapper[4787]: E0129 13:44:48.988025 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:44:59 crc kubenswrapper[4787]: I0129 13:44:59.986802 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:44:59 crc kubenswrapper[4787]: E0129 13:44:59.987905 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.167448 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl"] Jan 29 13:45:00 crc kubenswrapper[4787]: E0129 13:45:00.167940 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="registry-server" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.167962 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="registry-server" Jan 29 13:45:00 crc kubenswrapper[4787]: E0129 13:45:00.167978 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="extract-content" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.167988 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="extract-content" Jan 29 13:45:00 crc kubenswrapper[4787]: E0129 13:45:00.168007 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="extract-utilities" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.168019 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="extract-utilities" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.168250 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ab416f-744b-426e-a900-695911fafdb2" containerName="registry-server" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.168958 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.172166 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.172563 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.190626 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl"] Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.210199 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.210297 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.210330 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkgdv\" (UniqueName: \"kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.311395 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkgdv\" (UniqueName: \"kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.311737 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.311919 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.312799 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.317861 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.328408 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkgdv\" (UniqueName: \"kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv\") pod \"collect-profiles-29494905-t5btl\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.505271 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:00 crc kubenswrapper[4787]: I0129 13:45:00.970417 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl"] Jan 29 13:45:01 crc kubenswrapper[4787]: I0129 13:45:01.004086 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" event={"ID":"958a9bbd-27d4-490e-9ae6-eafcae5db33a","Type":"ContainerStarted","Data":"1f0c75266c67d4840a9b87a6088ccbe36134582b3690e02e9b369b482d743ee6"} Jan 29 13:45:02 crc kubenswrapper[4787]: I0129 13:45:02.028900 4787 generic.go:334] "Generic (PLEG): container finished" podID="958a9bbd-27d4-490e-9ae6-eafcae5db33a" containerID="9f532fd3f159dced3d571fe0bd7522a5fdf04afd99159e58bddaaae262e687c6" exitCode=0 Jan 29 13:45:02 crc kubenswrapper[4787]: I0129 13:45:02.029021 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" event={"ID":"958a9bbd-27d4-490e-9ae6-eafcae5db33a","Type":"ContainerDied","Data":"9f532fd3f159dced3d571fe0bd7522a5fdf04afd99159e58bddaaae262e687c6"} Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.290258 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.454528 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume\") pod \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.454650 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume\") pod \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.454832 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkgdv\" (UniqueName: \"kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv\") pod \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\" (UID: \"958a9bbd-27d4-490e-9ae6-eafcae5db33a\") " Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.455646 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume" (OuterVolumeSpecName: "config-volume") pod "958a9bbd-27d4-490e-9ae6-eafcae5db33a" (UID: "958a9bbd-27d4-490e-9ae6-eafcae5db33a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.462889 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv" (OuterVolumeSpecName: "kube-api-access-tkgdv") pod "958a9bbd-27d4-490e-9ae6-eafcae5db33a" (UID: "958a9bbd-27d4-490e-9ae6-eafcae5db33a"). InnerVolumeSpecName "kube-api-access-tkgdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.464871 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "958a9bbd-27d4-490e-9ae6-eafcae5db33a" (UID: "958a9bbd-27d4-490e-9ae6-eafcae5db33a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.556323 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/958a9bbd-27d4-490e-9ae6-eafcae5db33a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.556380 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/958a9bbd-27d4-490e-9ae6-eafcae5db33a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 13:45:03 crc kubenswrapper[4787]: I0129 13:45:03.556410 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkgdv\" (UniqueName: \"kubernetes.io/projected/958a9bbd-27d4-490e-9ae6-eafcae5db33a-kube-api-access-tkgdv\") on node \"crc\" DevicePath \"\"" Jan 29 13:45:04 crc kubenswrapper[4787]: I0129 13:45:04.044221 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" event={"ID":"958a9bbd-27d4-490e-9ae6-eafcae5db33a","Type":"ContainerDied","Data":"1f0c75266c67d4840a9b87a6088ccbe36134582b3690e02e9b369b482d743ee6"} Jan 29 13:45:04 crc kubenswrapper[4787]: I0129 13:45:04.044583 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f0c75266c67d4840a9b87a6088ccbe36134582b3690e02e9b369b482d743ee6" Jan 29 13:45:04 crc kubenswrapper[4787]: I0129 13:45:04.044298 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl" Jan 29 13:45:14 crc kubenswrapper[4787]: I0129 13:45:14.986441 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:45:14 crc kubenswrapper[4787]: E0129 13:45:14.987700 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.133562 4787 scope.go:117] "RemoveContainer" containerID="6a1ee377dcb7020ee48e27605a40b00d9b78bd8ecb94de501fab6bd3df6c95f7" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.173783 4787 scope.go:117] "RemoveContainer" containerID="3ab098aaa3619a2e900d7cada3a9a1250c0529a3012322f8c741285c7f65ce68" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.221235 4787 scope.go:117] "RemoveContainer" containerID="8b95125a5a58499681bf23ee00e08e6866abcec3495ceb7c8a8bc0f7ba0498e2" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.241426 4787 scope.go:117] "RemoveContainer" containerID="d66315da679b94f0778bd7588ad6dc7fdc2f5546aee3df91a887d454cd5b7ba2" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.260410 4787 scope.go:117] "RemoveContainer" containerID="eb129b091c246c6b3efe6a68ff640e44a192a5d03b945968aaf8ecf282346312" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.311549 4787 scope.go:117] "RemoveContainer" containerID="36e79d43872b82fc3af46ec38ec2f9486c3bd213dd9ce4372c2b33fd42a387e4" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.328209 4787 scope.go:117] "RemoveContainer" containerID="b5889b0839f03de550aac1072fe9346c8f6283198062869bde3b352c8aa8ad03" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.343983 4787 scope.go:117] "RemoveContainer" containerID="5c0e1b31ffd86bab49198138d4e0ac050ebf0c35b80508d01ec92940afd2ce53" Jan 29 13:45:23 crc kubenswrapper[4787]: I0129 13:45:23.379223 4787 scope.go:117] "RemoveContainer" containerID="dd512a8155b6324cad09bf4250a8dc3eb9e30fa7e84be8ddea6199714e1a72ab" Jan 29 13:45:28 crc kubenswrapper[4787]: I0129 13:45:28.985853 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:45:28 crc kubenswrapper[4787]: E0129 13:45:28.986369 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:45:40 crc kubenswrapper[4787]: I0129 13:45:40.986063 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:45:40 crc kubenswrapper[4787]: E0129 13:45:40.987180 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:45:53 crc kubenswrapper[4787]: I0129 13:45:53.985819 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:45:53 crc kubenswrapper[4787]: E0129 13:45:53.986488 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:46:06 crc kubenswrapper[4787]: I0129 13:46:06.986413 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:46:07 crc kubenswrapper[4787]: I0129 13:46:07.583395 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c"} Jan 29 13:46:23 crc kubenswrapper[4787]: I0129 13:46:23.508848 4787 scope.go:117] "RemoveContainer" containerID="ba639ee3ff7377d69eaf774b671ae0c100eaa7448c3c876737ed5dc86aecd94f" Jan 29 13:46:23 crc kubenswrapper[4787]: I0129 13:46:23.543491 4787 scope.go:117] "RemoveContainer" containerID="e36b1605a3b04ed9fbf874c7a5383659083ef860921f7d81eb3be81f257cfe63" Jan 29 13:46:23 crc kubenswrapper[4787]: I0129 13:46:23.568642 4787 scope.go:117] "RemoveContainer" containerID="a8692568c18184c00d2da8446d64f8b935631ac2dbe63cd7cc1211e1a04eae5d" Jan 29 13:46:23 crc kubenswrapper[4787]: I0129 13:46:23.590585 4787 scope.go:117] "RemoveContainer" containerID="2ed40eb4eb2d3cdf467d437f74046a37ba0764a96809f702ca6b5a682ad85043" Jan 29 13:46:23 crc kubenswrapper[4787]: I0129 13:46:23.614583 4787 scope.go:117] "RemoveContainer" containerID="29662081c93b7af728cfcfbd960a518ebc0b467e27dbe1770dd64d186b551e99" Jan 29 13:48:28 crc kubenswrapper[4787]: I0129 13:48:28.394748 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:48:28 crc kubenswrapper[4787]: I0129 13:48:28.395362 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:48:58 crc kubenswrapper[4787]: I0129 13:48:58.394988 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:48:58 crc kubenswrapper[4787]: I0129 13:48:58.395725 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:49:28 crc kubenswrapper[4787]: I0129 13:49:28.395023 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:49:28 crc kubenswrapper[4787]: I0129 13:49:28.397293 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:49:28 crc kubenswrapper[4787]: I0129 13:49:28.397356 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:49:28 crc kubenswrapper[4787]: I0129 13:49:28.398047 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:49:28 crc kubenswrapper[4787]: I0129 13:49:28.398127 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c" gracePeriod=600 Jan 29 13:49:29 crc kubenswrapper[4787]: I0129 13:49:29.326593 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c" exitCode=0 Jan 29 13:49:29 crc kubenswrapper[4787]: I0129 13:49:29.326770 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c"} Jan 29 13:49:29 crc kubenswrapper[4787]: I0129 13:49:29.327171 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57"} Jan 29 13:49:29 crc kubenswrapper[4787]: I0129 13:49:29.327217 4787 scope.go:117] "RemoveContainer" containerID="82ca23063690d87b8e7facb8fa091bf5b0748249c28857ba16c91c1e1c0ff6b0" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.186303 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:10 crc kubenswrapper[4787]: E0129 13:50:10.187817 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="958a9bbd-27d4-490e-9ae6-eafcae5db33a" containerName="collect-profiles" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.187855 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="958a9bbd-27d4-490e-9ae6-eafcae5db33a" containerName="collect-profiles" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.188222 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="958a9bbd-27d4-490e-9ae6-eafcae5db33a" containerName="collect-profiles" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.190885 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.203966 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.335726 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.335820 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.335891 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5756j\" (UniqueName: \"kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.437006 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.437325 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.437356 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5756j\" (UniqueName: \"kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.437481 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.437786 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.456102 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5756j\" (UniqueName: \"kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j\") pod \"certified-operators-mjzmv\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:10 crc kubenswrapper[4787]: I0129 13:50:10.516335 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:11 crc kubenswrapper[4787]: I0129 13:50:11.014604 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:11 crc kubenswrapper[4787]: I0129 13:50:11.672999 4787 generic.go:334] "Generic (PLEG): container finished" podID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerID="2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a" exitCode=0 Jan 29 13:50:11 crc kubenswrapper[4787]: I0129 13:50:11.673047 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerDied","Data":"2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a"} Jan 29 13:50:11 crc kubenswrapper[4787]: I0129 13:50:11.673083 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerStarted","Data":"c5c6d0503e61d30bcdb44093d21b326b6d8d043e2c4949cf2dcf09b9b0437c9f"} Jan 29 13:50:11 crc kubenswrapper[4787]: I0129 13:50:11.675062 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.568781 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.570185 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.579569 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.579648 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kd4t\" (UniqueName: \"kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.579686 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.586742 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.680831 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.680883 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kd4t\" (UniqueName: \"kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.680906 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.681486 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.681713 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.713724 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kd4t\" (UniqueName: \"kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t\") pod \"community-operators-gbt26\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:12 crc kubenswrapper[4787]: I0129 13:50:12.909494 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.234719 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:13 crc kubenswrapper[4787]: W0129 13:50:13.236014 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod292ebfa4_d772_49e4_a4c8_ee239da5cc90.slice/crio-16f8a810ef09c3ccfd61348aee9c04d802812e1868f2230a87897a1e4a974ae6 WatchSource:0}: Error finding container 16f8a810ef09c3ccfd61348aee9c04d802812e1868f2230a87897a1e4a974ae6: Status 404 returned error can't find the container with id 16f8a810ef09c3ccfd61348aee9c04d802812e1868f2230a87897a1e4a974ae6 Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.691324 4787 generic.go:334] "Generic (PLEG): container finished" podID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerID="f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876" exitCode=0 Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.691427 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerDied","Data":"f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876"} Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.702080 4787 generic.go:334] "Generic (PLEG): container finished" podID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerID="bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e" exitCode=0 Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.702153 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerDied","Data":"bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e"} Jan 29 13:50:13 crc kubenswrapper[4787]: I0129 13:50:13.702208 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerStarted","Data":"16f8a810ef09c3ccfd61348aee9c04d802812e1868f2230a87897a1e4a974ae6"} Jan 29 13:50:14 crc kubenswrapper[4787]: I0129 13:50:14.710614 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerStarted","Data":"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75"} Jan 29 13:50:14 crc kubenswrapper[4787]: I0129 13:50:14.712707 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerStarted","Data":"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696"} Jan 29 13:50:14 crc kubenswrapper[4787]: I0129 13:50:14.774876 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mjzmv" podStartSLOduration=2.320824348 podStartE2EDuration="4.774859324s" podCreationTimestamp="2026-01-29 13:50:10 +0000 UTC" firstStartedPulling="2026-01-29 13:50:11.674573431 +0000 UTC m=+2050.435833747" lastFinishedPulling="2026-01-29 13:50:14.128608447 +0000 UTC m=+2052.889868723" observedRunningTime="2026-01-29 13:50:14.773056653 +0000 UTC m=+2053.534316939" watchObservedRunningTime="2026-01-29 13:50:14.774859324 +0000 UTC m=+2053.536119600" Jan 29 13:50:15 crc kubenswrapper[4787]: I0129 13:50:15.723438 4787 generic.go:334] "Generic (PLEG): container finished" podID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerID="5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75" exitCode=0 Jan 29 13:50:15 crc kubenswrapper[4787]: I0129 13:50:15.724795 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerDied","Data":"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75"} Jan 29 13:50:17 crc kubenswrapper[4787]: I0129 13:50:17.742367 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerStarted","Data":"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a"} Jan 29 13:50:17 crc kubenswrapper[4787]: I0129 13:50:17.769493 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gbt26" podStartSLOduration=2.184205283 podStartE2EDuration="5.769471932s" podCreationTimestamp="2026-01-29 13:50:12 +0000 UTC" firstStartedPulling="2026-01-29 13:50:13.704072024 +0000 UTC m=+2052.465332300" lastFinishedPulling="2026-01-29 13:50:17.289338663 +0000 UTC m=+2056.050598949" observedRunningTime="2026-01-29 13:50:17.759992923 +0000 UTC m=+2056.521253219" watchObservedRunningTime="2026-01-29 13:50:17.769471932 +0000 UTC m=+2056.530732218" Jan 29 13:50:20 crc kubenswrapper[4787]: I0129 13:50:20.517262 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:20 crc kubenswrapper[4787]: I0129 13:50:20.517792 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:20 crc kubenswrapper[4787]: I0129 13:50:20.575061 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:20 crc kubenswrapper[4787]: I0129 13:50:20.817812 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:21 crc kubenswrapper[4787]: I0129 13:50:21.565111 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:22 crc kubenswrapper[4787]: I0129 13:50:22.783045 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mjzmv" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="registry-server" containerID="cri-o://fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696" gracePeriod=2 Jan 29 13:50:22 crc kubenswrapper[4787]: I0129 13:50:22.910152 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:22 crc kubenswrapper[4787]: I0129 13:50:22.910221 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:22 crc kubenswrapper[4787]: I0129 13:50:22.957323 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.201603 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.344636 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities\") pod \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.344692 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content\") pod \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.344806 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5756j\" (UniqueName: \"kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j\") pod \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\" (UID: \"23ed2830-22f2-4c9d-bd16-112bd7c0420a\") " Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.345432 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities" (OuterVolumeSpecName: "utilities") pod "23ed2830-22f2-4c9d-bd16-112bd7c0420a" (UID: "23ed2830-22f2-4c9d-bd16-112bd7c0420a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.350542 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j" (OuterVolumeSpecName: "kube-api-access-5756j") pod "23ed2830-22f2-4c9d-bd16-112bd7c0420a" (UID: "23ed2830-22f2-4c9d-bd16-112bd7c0420a"). InnerVolumeSpecName "kube-api-access-5756j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.425874 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23ed2830-22f2-4c9d-bd16-112bd7c0420a" (UID: "23ed2830-22f2-4c9d-bd16-112bd7c0420a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.446177 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.446362 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23ed2830-22f2-4c9d-bd16-112bd7c0420a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.446444 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5756j\" (UniqueName: \"kubernetes.io/projected/23ed2830-22f2-4c9d-bd16-112bd7c0420a-kube-api-access-5756j\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.793305 4787 generic.go:334] "Generic (PLEG): container finished" podID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerID="fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696" exitCode=0 Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.793404 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mjzmv" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.793420 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerDied","Data":"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696"} Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.793557 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mjzmv" event={"ID":"23ed2830-22f2-4c9d-bd16-112bd7c0420a","Type":"ContainerDied","Data":"c5c6d0503e61d30bcdb44093d21b326b6d8d043e2c4949cf2dcf09b9b0437c9f"} Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.793623 4787 scope.go:117] "RemoveContainer" containerID="fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.841497 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.845897 4787 scope.go:117] "RemoveContainer" containerID="f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.852731 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mjzmv"] Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.876863 4787 scope.go:117] "RemoveContainer" containerID="2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.881138 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.910323 4787 scope.go:117] "RemoveContainer" containerID="fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696" Jan 29 13:50:23 crc kubenswrapper[4787]: E0129 13:50:23.910985 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696\": container with ID starting with fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696 not found: ID does not exist" containerID="fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.911050 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696"} err="failed to get container status \"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696\": rpc error: code = NotFound desc = could not find container \"fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696\": container with ID starting with fc40993f99a6d2fb4067531f2fb679bb893135f20631cbb34b20efce6c61b696 not found: ID does not exist" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.911090 4787 scope.go:117] "RemoveContainer" containerID="f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876" Jan 29 13:50:23 crc kubenswrapper[4787]: E0129 13:50:23.911571 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876\": container with ID starting with f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876 not found: ID does not exist" containerID="f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.911610 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876"} err="failed to get container status \"f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876\": rpc error: code = NotFound desc = could not find container \"f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876\": container with ID starting with f5f543efeef3bcf7a881e8df6237d62528c09d6c01f78f8eb3acc0b933233876 not found: ID does not exist" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.911642 4787 scope.go:117] "RemoveContainer" containerID="2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a" Jan 29 13:50:23 crc kubenswrapper[4787]: E0129 13:50:23.912003 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a\": container with ID starting with 2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a not found: ID does not exist" containerID="2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.912027 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a"} err="failed to get container status \"2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a\": rpc error: code = NotFound desc = could not find container \"2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a\": container with ID starting with 2150de4a49c34f69aa758604803fbf6d646bbe00c17660d71d6c9631720b5f6a not found: ID does not exist" Jan 29 13:50:23 crc kubenswrapper[4787]: I0129 13:50:23.996280 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" path="/var/lib/kubelet/pods/23ed2830-22f2-4c9d-bd16-112bd7c0420a/volumes" Jan 29 13:50:25 crc kubenswrapper[4787]: I0129 13:50:25.360612 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:25 crc kubenswrapper[4787]: I0129 13:50:25.810843 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gbt26" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="registry-server" containerID="cri-o://448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a" gracePeriod=2 Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.165805 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.292444 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities\") pod \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.293330 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kd4t\" (UniqueName: \"kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t\") pod \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.293546 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities" (OuterVolumeSpecName: "utilities") pod "292ebfa4-d772-49e4-a4c8-ee239da5cc90" (UID: "292ebfa4-d772-49e4-a4c8-ee239da5cc90"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.294681 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content\") pod \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\" (UID: \"292ebfa4-d772-49e4-a4c8-ee239da5cc90\") " Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.295152 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.298426 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t" (OuterVolumeSpecName: "kube-api-access-4kd4t") pod "292ebfa4-d772-49e4-a4c8-ee239da5cc90" (UID: "292ebfa4-d772-49e4-a4c8-ee239da5cc90"). InnerVolumeSpecName "kube-api-access-4kd4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.351949 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "292ebfa4-d772-49e4-a4c8-ee239da5cc90" (UID: "292ebfa4-d772-49e4-a4c8-ee239da5cc90"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.396572 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kd4t\" (UniqueName: \"kubernetes.io/projected/292ebfa4-d772-49e4-a4c8-ee239da5cc90-kube-api-access-4kd4t\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.397648 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/292ebfa4-d772-49e4-a4c8-ee239da5cc90-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.820021 4787 generic.go:334] "Generic (PLEG): container finished" podID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerID="448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a" exitCode=0 Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.820084 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerDied","Data":"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a"} Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.820100 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gbt26" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.820133 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gbt26" event={"ID":"292ebfa4-d772-49e4-a4c8-ee239da5cc90","Type":"ContainerDied","Data":"16f8a810ef09c3ccfd61348aee9c04d802812e1868f2230a87897a1e4a974ae6"} Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.820164 4787 scope.go:117] "RemoveContainer" containerID="448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.841446 4787 scope.go:117] "RemoveContainer" containerID="5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.870229 4787 scope.go:117] "RemoveContainer" containerID="bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.874677 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.879394 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gbt26"] Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.900826 4787 scope.go:117] "RemoveContainer" containerID="448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a" Jan 29 13:50:26 crc kubenswrapper[4787]: E0129 13:50:26.901331 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a\": container with ID starting with 448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a not found: ID does not exist" containerID="448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.901385 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a"} err="failed to get container status \"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a\": rpc error: code = NotFound desc = could not find container \"448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a\": container with ID starting with 448483f24364c18c26153031c88e16eec05eba9708435fe2804482e94467816a not found: ID does not exist" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.901412 4787 scope.go:117] "RemoveContainer" containerID="5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75" Jan 29 13:50:26 crc kubenswrapper[4787]: E0129 13:50:26.901672 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75\": container with ID starting with 5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75 not found: ID does not exist" containerID="5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.901718 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75"} err="failed to get container status \"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75\": rpc error: code = NotFound desc = could not find container \"5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75\": container with ID starting with 5a0ba8f09878d929e687b473d4904ea18141399085eb27d4ac787a596cfcba75 not found: ID does not exist" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.901735 4787 scope.go:117] "RemoveContainer" containerID="bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e" Jan 29 13:50:26 crc kubenswrapper[4787]: E0129 13:50:26.902212 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e\": container with ID starting with bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e not found: ID does not exist" containerID="bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e" Jan 29 13:50:26 crc kubenswrapper[4787]: I0129 13:50:26.902261 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e"} err="failed to get container status \"bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e\": rpc error: code = NotFound desc = could not find container \"bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e\": container with ID starting with bfce1077b571245183800061a27d758b5fca9d0c98e61895b6f73a076b84110e not found: ID does not exist" Jan 29 13:50:27 crc kubenswrapper[4787]: I0129 13:50:27.996860 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" path="/var/lib/kubelet/pods/292ebfa4-d772-49e4-a4c8-ee239da5cc90/volumes" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775099 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775537 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="extract-content" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775565 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="extract-content" Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775613 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="extract-utilities" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775624 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="extract-utilities" Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775651 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="extract-content" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775662 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="extract-content" Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775683 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775693 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775714 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775725 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: E0129 13:50:28.775739 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="extract-utilities" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775750 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="extract-utilities" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775969 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="23ed2830-22f2-4c9d-bd16-112bd7c0420a" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.775992 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="292ebfa4-d772-49e4-a4c8-ee239da5cc90" containerName="registry-server" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.777665 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.786080 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.932760 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.933077 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:28 crc kubenswrapper[4787]: I0129 13:50:28.933272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll2mt\" (UniqueName: \"kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.035002 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.035139 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll2mt\" (UniqueName: \"kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.035208 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.035497 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.035552 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.054349 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll2mt\" (UniqueName: \"kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt\") pod \"redhat-operators-d2j68\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.111886 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.534799 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.846234 4787 generic.go:334] "Generic (PLEG): container finished" podID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerID="f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6" exitCode=0 Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.846281 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerDied","Data":"f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6"} Jan 29 13:50:29 crc kubenswrapper[4787]: I0129 13:50:29.846312 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerStarted","Data":"218a5b3d3cef076b926befc1ea92ef14a7f01939b140d6f678d8e6945dd25e27"} Jan 29 13:50:31 crc kubenswrapper[4787]: I0129 13:50:31.867379 4787 generic.go:334] "Generic (PLEG): container finished" podID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerID="bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803" exitCode=0 Jan 29 13:50:31 crc kubenswrapper[4787]: I0129 13:50:31.867495 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerDied","Data":"bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803"} Jan 29 13:50:32 crc kubenswrapper[4787]: I0129 13:50:32.888711 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerStarted","Data":"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12"} Jan 29 13:50:32 crc kubenswrapper[4787]: I0129 13:50:32.917267 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-d2j68" podStartSLOduration=2.328828011 podStartE2EDuration="4.917244257s" podCreationTimestamp="2026-01-29 13:50:28 +0000 UTC" firstStartedPulling="2026-01-29 13:50:29.848375993 +0000 UTC m=+2068.609636269" lastFinishedPulling="2026-01-29 13:50:32.436792189 +0000 UTC m=+2071.198052515" observedRunningTime="2026-01-29 13:50:32.913091379 +0000 UTC m=+2071.674351685" watchObservedRunningTime="2026-01-29 13:50:32.917244257 +0000 UTC m=+2071.678504543" Jan 29 13:50:39 crc kubenswrapper[4787]: I0129 13:50:39.113071 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:39 crc kubenswrapper[4787]: I0129 13:50:39.113417 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:40 crc kubenswrapper[4787]: I0129 13:50:40.158311 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-d2j68" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="registry-server" probeResult="failure" output=< Jan 29 13:50:40 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 13:50:40 crc kubenswrapper[4787]: > Jan 29 13:50:49 crc kubenswrapper[4787]: I0129 13:50:49.187942 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:49 crc kubenswrapper[4787]: I0129 13:50:49.248692 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:49 crc kubenswrapper[4787]: I0129 13:50:49.435100 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.044960 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-d2j68" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="registry-server" containerID="cri-o://31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12" gracePeriod=2 Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.949497 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.992664 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities\") pod \"03f79d23-a349-4e59-992e-3fb24999ba3b\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.992709 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll2mt\" (UniqueName: \"kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt\") pod \"03f79d23-a349-4e59-992e-3fb24999ba3b\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.992792 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content\") pod \"03f79d23-a349-4e59-992e-3fb24999ba3b\" (UID: \"03f79d23-a349-4e59-992e-3fb24999ba3b\") " Jan 29 13:50:51 crc kubenswrapper[4787]: I0129 13:50:51.993420 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities" (OuterVolumeSpecName: "utilities") pod "03f79d23-a349-4e59-992e-3fb24999ba3b" (UID: "03f79d23-a349-4e59-992e-3fb24999ba3b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.005771 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt" (OuterVolumeSpecName: "kube-api-access-ll2mt") pod "03f79d23-a349-4e59-992e-3fb24999ba3b" (UID: "03f79d23-a349-4e59-992e-3fb24999ba3b"). InnerVolumeSpecName "kube-api-access-ll2mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.058055 4787 generic.go:334] "Generic (PLEG): container finished" podID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerID="31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12" exitCode=0 Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.058098 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerDied","Data":"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12"} Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.058125 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d2j68" event={"ID":"03f79d23-a349-4e59-992e-3fb24999ba3b","Type":"ContainerDied","Data":"218a5b3d3cef076b926befc1ea92ef14a7f01939b140d6f678d8e6945dd25e27"} Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.058142 4787 scope.go:117] "RemoveContainer" containerID="31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.058247 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d2j68" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.086092 4787 scope.go:117] "RemoveContainer" containerID="bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.093853 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.093872 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll2mt\" (UniqueName: \"kubernetes.io/projected/03f79d23-a349-4e59-992e-3fb24999ba3b-kube-api-access-ll2mt\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.117638 4787 scope.go:117] "RemoveContainer" containerID="f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.135447 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03f79d23-a349-4e59-992e-3fb24999ba3b" (UID: "03f79d23-a349-4e59-992e-3fb24999ba3b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.137332 4787 scope.go:117] "RemoveContainer" containerID="31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12" Jan 29 13:50:52 crc kubenswrapper[4787]: E0129 13:50:52.137735 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12\": container with ID starting with 31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12 not found: ID does not exist" containerID="31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.137770 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12"} err="failed to get container status \"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12\": rpc error: code = NotFound desc = could not find container \"31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12\": container with ID starting with 31046ee6876d367f0e60d0c4ad2274e677c9b9b8a553d987c330ef0fb8d27b12 not found: ID does not exist" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.137796 4787 scope.go:117] "RemoveContainer" containerID="bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803" Jan 29 13:50:52 crc kubenswrapper[4787]: E0129 13:50:52.138246 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803\": container with ID starting with bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803 not found: ID does not exist" containerID="bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.138297 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803"} err="failed to get container status \"bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803\": rpc error: code = NotFound desc = could not find container \"bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803\": container with ID starting with bdbbbd5a4810f55507c7eb359900ec0621b793b5a4773cd3b273de3e32d20803 not found: ID does not exist" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.138337 4787 scope.go:117] "RemoveContainer" containerID="f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6" Jan 29 13:50:52 crc kubenswrapper[4787]: E0129 13:50:52.138820 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6\": container with ID starting with f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6 not found: ID does not exist" containerID="f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.138873 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6"} err="failed to get container status \"f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6\": rpc error: code = NotFound desc = could not find container \"f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6\": container with ID starting with f21a08d53f1a809551333f92b88014abe61fa186a385811cf1c6b93cdff2a7e6 not found: ID does not exist" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.195032 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f79d23-a349-4e59-992e-3fb24999ba3b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.417315 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:52 crc kubenswrapper[4787]: I0129 13:50:52.424423 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-d2j68"] Jan 29 13:50:54 crc kubenswrapper[4787]: I0129 13:50:54.002425 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" path="/var/lib/kubelet/pods/03f79d23-a349-4e59-992e-3fb24999ba3b/volumes" Jan 29 13:51:28 crc kubenswrapper[4787]: I0129 13:51:28.394604 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:51:28 crc kubenswrapper[4787]: I0129 13:51:28.395039 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:51:58 crc kubenswrapper[4787]: I0129 13:51:58.394356 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:51:58 crc kubenswrapper[4787]: I0129 13:51:58.395045 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.394933 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.395537 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.395586 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.396220 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.396271 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" gracePeriod=600 Jan 29 13:52:28 crc kubenswrapper[4787]: E0129 13:52:28.520105 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.625275 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" exitCode=0 Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.625347 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57"} Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.625708 4787 scope.go:117] "RemoveContainer" containerID="cddddbe85171c69ecf6f2df287a612ec33802c41c5c0658d6f8b713cd24abc5c" Jan 29 13:52:28 crc kubenswrapper[4787]: I0129 13:52:28.626178 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:52:28 crc kubenswrapper[4787]: E0129 13:52:28.626443 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:52:41 crc kubenswrapper[4787]: I0129 13:52:41.990038 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:52:41 crc kubenswrapper[4787]: E0129 13:52:41.990799 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:52:56 crc kubenswrapper[4787]: I0129 13:52:56.985831 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:52:56 crc kubenswrapper[4787]: E0129 13:52:56.986813 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:57.999604 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:52:58 crc kubenswrapper[4787]: E0129 13:52:58.000335 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="extract-utilities" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.000358 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="extract-utilities" Jan 29 13:52:58 crc kubenswrapper[4787]: E0129 13:52:58.000413 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="registry-server" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.000426 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="registry-server" Jan 29 13:52:58 crc kubenswrapper[4787]: E0129 13:52:58.000450 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="extract-content" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.000519 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="extract-content" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.000732 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="03f79d23-a349-4e59-992e-3fb24999ba3b" containerName="registry-server" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.002345 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.069989 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.095752 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddqmn\" (UniqueName: \"kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.096240 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.097420 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.198833 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddqmn\" (UniqueName: \"kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.198920 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.198943 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.199485 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.199708 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.226387 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddqmn\" (UniqueName: \"kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn\") pod \"redhat-marketplace-g7c6b\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.376922 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.815687 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:52:58 crc kubenswrapper[4787]: I0129 13:52:58.867187 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerStarted","Data":"76ec4dd09bf1da233029318da543c4d7fd415d4a5201a8abc509ec583b4e1a8a"} Jan 29 13:52:59 crc kubenswrapper[4787]: I0129 13:52:59.880438 4787 generic.go:334] "Generic (PLEG): container finished" podID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerID="89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a" exitCode=0 Jan 29 13:52:59 crc kubenswrapper[4787]: I0129 13:52:59.880526 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerDied","Data":"89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a"} Jan 29 13:53:01 crc kubenswrapper[4787]: I0129 13:53:01.899954 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerDied","Data":"b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c"} Jan 29 13:53:01 crc kubenswrapper[4787]: I0129 13:53:01.899870 4787 generic.go:334] "Generic (PLEG): container finished" podID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerID="b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c" exitCode=0 Jan 29 13:53:03 crc kubenswrapper[4787]: I0129 13:53:03.920216 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerStarted","Data":"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3"} Jan 29 13:53:03 crc kubenswrapper[4787]: I0129 13:53:03.942344 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g7c6b" podStartSLOduration=4.022279773 podStartE2EDuration="6.94232295s" podCreationTimestamp="2026-01-29 13:52:57 +0000 UTC" firstStartedPulling="2026-01-29 13:52:59.882236904 +0000 UTC m=+2218.643497210" lastFinishedPulling="2026-01-29 13:53:02.802280111 +0000 UTC m=+2221.563540387" observedRunningTime="2026-01-29 13:53:03.938401689 +0000 UTC m=+2222.699661975" watchObservedRunningTime="2026-01-29 13:53:03.94232295 +0000 UTC m=+2222.703583226" Jan 29 13:53:08 crc kubenswrapper[4787]: I0129 13:53:08.377814 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:08 crc kubenswrapper[4787]: I0129 13:53:08.379622 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:08 crc kubenswrapper[4787]: I0129 13:53:08.440639 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:09 crc kubenswrapper[4787]: I0129 13:53:09.012665 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:09 crc kubenswrapper[4787]: I0129 13:53:09.057720 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:53:10 crc kubenswrapper[4787]: I0129 13:53:10.971382 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g7c6b" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="registry-server" containerID="cri-o://3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3" gracePeriod=2 Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.352384 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.512328 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddqmn\" (UniqueName: \"kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn\") pod \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.512402 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities\") pod \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.512514 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content\") pod \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\" (UID: \"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4\") " Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.513672 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities" (OuterVolumeSpecName: "utilities") pod "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" (UID: "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.518373 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn" (OuterVolumeSpecName: "kube-api-access-ddqmn") pod "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" (UID: "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4"). InnerVolumeSpecName "kube-api-access-ddqmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.536887 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" (UID: "c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.614240 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddqmn\" (UniqueName: \"kubernetes.io/projected/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-kube-api-access-ddqmn\") on node \"crc\" DevicePath \"\"" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.614272 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.614284 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.981916 4787 generic.go:334] "Generic (PLEG): container finished" podID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerID="3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3" exitCode=0 Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.981971 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerDied","Data":"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3"} Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.982004 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g7c6b" event={"ID":"c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4","Type":"ContainerDied","Data":"76ec4dd09bf1da233029318da543c4d7fd415d4a5201a8abc509ec583b4e1a8a"} Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.982024 4787 scope.go:117] "RemoveContainer" containerID="3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.982158 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g7c6b" Jan 29 13:53:11 crc kubenswrapper[4787]: I0129 13:53:11.986668 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:53:11 crc kubenswrapper[4787]: E0129 13:53:11.986924 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.013011 4787 scope.go:117] "RemoveContainer" containerID="b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.024998 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.030396 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g7c6b"] Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.042697 4787 scope.go:117] "RemoveContainer" containerID="89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.062895 4787 scope.go:117] "RemoveContainer" containerID="3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3" Jan 29 13:53:12 crc kubenswrapper[4787]: E0129 13:53:12.063784 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3\": container with ID starting with 3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3 not found: ID does not exist" containerID="3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.063832 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3"} err="failed to get container status \"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3\": rpc error: code = NotFound desc = could not find container \"3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3\": container with ID starting with 3e15af2eaa772e25cefb3ac2b1fc5cd4ced874eb69101fb34f23f6c30797b0a3 not found: ID does not exist" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.063862 4787 scope.go:117] "RemoveContainer" containerID="b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c" Jan 29 13:53:12 crc kubenswrapper[4787]: E0129 13:53:12.064209 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c\": container with ID starting with b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c not found: ID does not exist" containerID="b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.064328 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c"} err="failed to get container status \"b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c\": rpc error: code = NotFound desc = could not find container \"b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c\": container with ID starting with b336f69ae8ee6e313e092f59680b8a06913270b2f97eca50f915c4c67cf0e01c not found: ID does not exist" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.064442 4787 scope.go:117] "RemoveContainer" containerID="89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a" Jan 29 13:53:12 crc kubenswrapper[4787]: E0129 13:53:12.064842 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a\": container with ID starting with 89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a not found: ID does not exist" containerID="89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a" Jan 29 13:53:12 crc kubenswrapper[4787]: I0129 13:53:12.064882 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a"} err="failed to get container status \"89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a\": rpc error: code = NotFound desc = could not find container \"89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a\": container with ID starting with 89d08a381292b1b7962acff2bd3e9231ee234f498e38615aee8b1b8f7bfa196a not found: ID does not exist" Jan 29 13:53:13 crc kubenswrapper[4787]: I0129 13:53:13.994041 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" path="/var/lib/kubelet/pods/c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4/volumes" Jan 29 13:53:24 crc kubenswrapper[4787]: I0129 13:53:24.986815 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:53:24 crc kubenswrapper[4787]: E0129 13:53:24.987600 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:53:39 crc kubenswrapper[4787]: I0129 13:53:39.986110 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:53:39 crc kubenswrapper[4787]: E0129 13:53:39.987170 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:53:50 crc kubenswrapper[4787]: I0129 13:53:50.985821 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:53:50 crc kubenswrapper[4787]: E0129 13:53:50.986758 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:54:05 crc kubenswrapper[4787]: I0129 13:54:05.986649 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:54:05 crc kubenswrapper[4787]: E0129 13:54:05.987647 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:54:18 crc kubenswrapper[4787]: I0129 13:54:18.985196 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:54:18 crc kubenswrapper[4787]: E0129 13:54:18.985864 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:54:31 crc kubenswrapper[4787]: I0129 13:54:31.990737 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:54:31 crc kubenswrapper[4787]: E0129 13:54:31.992008 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:54:44 crc kubenswrapper[4787]: I0129 13:54:44.986109 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:54:44 crc kubenswrapper[4787]: E0129 13:54:44.987035 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:54:59 crc kubenswrapper[4787]: I0129 13:54:59.985836 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:54:59 crc kubenswrapper[4787]: E0129 13:54:59.986404 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:55:13 crc kubenswrapper[4787]: I0129 13:55:13.986198 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:55:13 crc kubenswrapper[4787]: E0129 13:55:13.986958 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:55:27 crc kubenswrapper[4787]: I0129 13:55:27.985890 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:55:27 crc kubenswrapper[4787]: E0129 13:55:27.986629 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:55:42 crc kubenswrapper[4787]: I0129 13:55:42.985296 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:55:42 crc kubenswrapper[4787]: E0129 13:55:42.986039 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:55:55 crc kubenswrapper[4787]: I0129 13:55:55.986839 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:55:55 crc kubenswrapper[4787]: E0129 13:55:55.987713 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:56:08 crc kubenswrapper[4787]: I0129 13:56:08.986264 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:56:08 crc kubenswrapper[4787]: E0129 13:56:08.987209 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:56:19 crc kubenswrapper[4787]: I0129 13:56:19.985426 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:56:19 crc kubenswrapper[4787]: E0129 13:56:19.986501 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:56:30 crc kubenswrapper[4787]: I0129 13:56:30.986340 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:56:30 crc kubenswrapper[4787]: E0129 13:56:30.988663 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:56:43 crc kubenswrapper[4787]: I0129 13:56:43.985967 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:56:43 crc kubenswrapper[4787]: E0129 13:56:43.986688 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:56:54 crc kubenswrapper[4787]: I0129 13:56:54.985117 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:56:54 crc kubenswrapper[4787]: E0129 13:56:54.985812 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:57:07 crc kubenswrapper[4787]: I0129 13:57:07.986317 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:57:07 crc kubenswrapper[4787]: E0129 13:57:07.987125 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:57:20 crc kubenswrapper[4787]: I0129 13:57:20.985938 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:57:20 crc kubenswrapper[4787]: E0129 13:57:20.987082 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 13:57:35 crc kubenswrapper[4787]: I0129 13:57:35.986305 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 13:57:37 crc kubenswrapper[4787]: I0129 13:57:37.166382 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f"} Jan 29 13:59:58 crc kubenswrapper[4787]: I0129 13:59:58.394724 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 13:59:58 crc kubenswrapper[4787]: I0129 13:59:58.396809 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.155377 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr"] Jan 29 14:00:00 crc kubenswrapper[4787]: E0129 14:00:00.156074 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="registry-server" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.156090 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="registry-server" Jan 29 14:00:00 crc kubenswrapper[4787]: E0129 14:00:00.156104 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="extract-content" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.156112 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="extract-content" Jan 29 14:00:00 crc kubenswrapper[4787]: E0129 14:00:00.156128 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="extract-utilities" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.156134 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="extract-utilities" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.156272 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3b691ec-bbbd-4d9b-9bbe-2fc04074fcb4" containerName="registry-server" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.156731 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.160140 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.160439 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.170283 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr"] Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.246192 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57b75\" (UniqueName: \"kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.246272 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.246352 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.348252 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57b75\" (UniqueName: \"kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.348327 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.348366 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.349419 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.357900 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.364440 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57b75\" (UniqueName: \"kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75\") pod \"collect-profiles-29494920-q8kwr\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.480618 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:00 crc kubenswrapper[4787]: I0129 14:00:00.906801 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr"] Jan 29 14:00:01 crc kubenswrapper[4787]: I0129 14:00:01.489838 4787 generic.go:334] "Generic (PLEG): container finished" podID="cc0b8909-40fc-44a7-8323-7cca90901efa" containerID="4089083518a7fd0f0fcfe4ff92eecaeade4b631506203f47362bb4d129b88db2" exitCode=0 Jan 29 14:00:01 crc kubenswrapper[4787]: I0129 14:00:01.489879 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" event={"ID":"cc0b8909-40fc-44a7-8323-7cca90901efa","Type":"ContainerDied","Data":"4089083518a7fd0f0fcfe4ff92eecaeade4b631506203f47362bb4d129b88db2"} Jan 29 14:00:01 crc kubenswrapper[4787]: I0129 14:00:01.489906 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" event={"ID":"cc0b8909-40fc-44a7-8323-7cca90901efa","Type":"ContainerStarted","Data":"6bb58dc98505caabf7c1d8bfd9d17bafad91225421eed821c690fc39cec10eb4"} Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.784044 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.886676 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume\") pod \"cc0b8909-40fc-44a7-8323-7cca90901efa\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.886807 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume\") pod \"cc0b8909-40fc-44a7-8323-7cca90901efa\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.886854 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57b75\" (UniqueName: \"kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75\") pod \"cc0b8909-40fc-44a7-8323-7cca90901efa\" (UID: \"cc0b8909-40fc-44a7-8323-7cca90901efa\") " Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.887549 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume" (OuterVolumeSpecName: "config-volume") pod "cc0b8909-40fc-44a7-8323-7cca90901efa" (UID: "cc0b8909-40fc-44a7-8323-7cca90901efa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.893409 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cc0b8909-40fc-44a7-8323-7cca90901efa" (UID: "cc0b8909-40fc-44a7-8323-7cca90901efa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.893404 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75" (OuterVolumeSpecName: "kube-api-access-57b75") pod "cc0b8909-40fc-44a7-8323-7cca90901efa" (UID: "cc0b8909-40fc-44a7-8323-7cca90901efa"). InnerVolumeSpecName "kube-api-access-57b75". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.988308 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cc0b8909-40fc-44a7-8323-7cca90901efa-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.988343 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57b75\" (UniqueName: \"kubernetes.io/projected/cc0b8909-40fc-44a7-8323-7cca90901efa-kube-api-access-57b75\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:02 crc kubenswrapper[4787]: I0129 14:00:02.988358 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cc0b8909-40fc-44a7-8323-7cca90901efa-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.503839 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" event={"ID":"cc0b8909-40fc-44a7-8323-7cca90901efa","Type":"ContainerDied","Data":"6bb58dc98505caabf7c1d8bfd9d17bafad91225421eed821c690fc39cec10eb4"} Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.503881 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bb58dc98505caabf7c1d8bfd9d17bafad91225421eed821c690fc39cec10eb4" Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.503890 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr" Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.872833 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k"] Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.885198 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494875-ln42k"] Jan 29 14:00:03 crc kubenswrapper[4787]: I0129 14:00:03.996329 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a190e8a-8a77-4af5-84ae-83976f1f88d1" path="/var/lib/kubelet/pods/8a190e8a-8a77-4af5-84ae-83976f1f88d1/volumes" Jan 29 14:00:24 crc kubenswrapper[4787]: I0129 14:00:24.012850 4787 scope.go:117] "RemoveContainer" containerID="a5bc2bf57f2925912ac6ffb7508963173f28a9303b0c91e717294a992b8436ca" Jan 29 14:00:27 crc kubenswrapper[4787]: I0129 14:00:27.959730 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:27 crc kubenswrapper[4787]: E0129 14:00:27.960889 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc0b8909-40fc-44a7-8323-7cca90901efa" containerName="collect-profiles" Jan 29 14:00:27 crc kubenswrapper[4787]: I0129 14:00:27.960917 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc0b8909-40fc-44a7-8323-7cca90901efa" containerName="collect-profiles" Jan 29 14:00:27 crc kubenswrapper[4787]: I0129 14:00:27.961169 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc0b8909-40fc-44a7-8323-7cca90901efa" containerName="collect-profiles" Jan 29 14:00:27 crc kubenswrapper[4787]: I0129 14:00:27.963000 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.049332 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.115151 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.115213 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmpgn\" (UniqueName: \"kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.115248 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.216642 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmpgn\" (UniqueName: \"kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.216732 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.216816 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.217348 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.217411 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.239380 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmpgn\" (UniqueName: \"kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn\") pod \"certified-operators-xl9rg\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.343263 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.394517 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.394874 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:00:28 crc kubenswrapper[4787]: I0129 14:00:28.852735 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:29 crc kubenswrapper[4787]: I0129 14:00:29.744301 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerID="da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3" exitCode=0 Jan 29 14:00:29 crc kubenswrapper[4787]: I0129 14:00:29.744405 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerDied","Data":"da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3"} Jan 29 14:00:29 crc kubenswrapper[4787]: I0129 14:00:29.744883 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerStarted","Data":"cacdc401fc460be31183f322fb54fa9d2f5d1246d974996664efb88909bc78f2"} Jan 29 14:00:29 crc kubenswrapper[4787]: I0129 14:00:29.749084 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:00:30 crc kubenswrapper[4787]: I0129 14:00:30.755214 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerStarted","Data":"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964"} Jan 29 14:00:30 crc kubenswrapper[4787]: E0129 14:00:30.871085 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fdf82dc_cc82_4924_9366_a3c2a5666818.slice/crio-8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964.scope\": RecentStats: unable to find data in memory cache]" Jan 29 14:00:31 crc kubenswrapper[4787]: I0129 14:00:31.768644 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerID="8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964" exitCode=0 Jan 29 14:00:31 crc kubenswrapper[4787]: I0129 14:00:31.768768 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerDied","Data":"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964"} Jan 29 14:00:32 crc kubenswrapper[4787]: I0129 14:00:32.778240 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerStarted","Data":"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a"} Jan 29 14:00:32 crc kubenswrapper[4787]: I0129 14:00:32.799856 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xl9rg" podStartSLOduration=3.161736674 podStartE2EDuration="5.799822864s" podCreationTimestamp="2026-01-29 14:00:27 +0000 UTC" firstStartedPulling="2026-01-29 14:00:29.748205787 +0000 UTC m=+2668.509466103" lastFinishedPulling="2026-01-29 14:00:32.386291977 +0000 UTC m=+2671.147552293" observedRunningTime="2026-01-29 14:00:32.792084084 +0000 UTC m=+2671.553344370" watchObservedRunningTime="2026-01-29 14:00:32.799822864 +0000 UTC m=+2671.561083200" Jan 29 14:00:38 crc kubenswrapper[4787]: I0129 14:00:38.343922 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:38 crc kubenswrapper[4787]: I0129 14:00:38.344572 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:38 crc kubenswrapper[4787]: I0129 14:00:38.437557 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:38 crc kubenswrapper[4787]: I0129 14:00:38.885553 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:38 crc kubenswrapper[4787]: I0129 14:00:38.939681 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:40 crc kubenswrapper[4787]: I0129 14:00:40.849924 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xl9rg" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="registry-server" containerID="cri-o://5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a" gracePeriod=2 Jan 29 14:00:41 crc kubenswrapper[4787]: E0129 14:00:41.069372 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fdf82dc_cc82_4924_9366_a3c2a5666818.slice/crio-conmon-5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a.scope\": RecentStats: unable to find data in memory cache]" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.266897 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.427568 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities\") pod \"5fdf82dc-cc82-4924-9366-a3c2a5666818\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.427614 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content\") pod \"5fdf82dc-cc82-4924-9366-a3c2a5666818\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.427655 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmpgn\" (UniqueName: \"kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn\") pod \"5fdf82dc-cc82-4924-9366-a3c2a5666818\" (UID: \"5fdf82dc-cc82-4924-9366-a3c2a5666818\") " Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.429843 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities" (OuterVolumeSpecName: "utilities") pod "5fdf82dc-cc82-4924-9366-a3c2a5666818" (UID: "5fdf82dc-cc82-4924-9366-a3c2a5666818"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.437549 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn" (OuterVolumeSpecName: "kube-api-access-zmpgn") pod "5fdf82dc-cc82-4924-9366-a3c2a5666818" (UID: "5fdf82dc-cc82-4924-9366-a3c2a5666818"). InnerVolumeSpecName "kube-api-access-zmpgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.504132 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5fdf82dc-cc82-4924-9366-a3c2a5666818" (UID: "5fdf82dc-cc82-4924-9366-a3c2a5666818"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.529920 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmpgn\" (UniqueName: \"kubernetes.io/projected/5fdf82dc-cc82-4924-9366-a3c2a5666818-kube-api-access-zmpgn\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.529972 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.529997 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fdf82dc-cc82-4924-9366-a3c2a5666818-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.868217 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerID="5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a" exitCode=0 Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.868724 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerDied","Data":"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a"} Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.868778 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xl9rg" event={"ID":"5fdf82dc-cc82-4924-9366-a3c2a5666818","Type":"ContainerDied","Data":"cacdc401fc460be31183f322fb54fa9d2f5d1246d974996664efb88909bc78f2"} Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.868840 4787 scope.go:117] "RemoveContainer" containerID="5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.869665 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xl9rg" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.913008 4787 scope.go:117] "RemoveContainer" containerID="8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.949668 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.962135 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xl9rg"] Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.965875 4787 scope.go:117] "RemoveContainer" containerID="da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.982005 4787 scope.go:117] "RemoveContainer" containerID="5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a" Jan 29 14:00:41 crc kubenswrapper[4787]: E0129 14:00:41.982428 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a\": container with ID starting with 5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a not found: ID does not exist" containerID="5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.982495 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a"} err="failed to get container status \"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a\": rpc error: code = NotFound desc = could not find container \"5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a\": container with ID starting with 5d4c164e0a4a3bbf20f770d0c330db797f963f16ac6c1d13ad04166db5b3f07a not found: ID does not exist" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.982528 4787 scope.go:117] "RemoveContainer" containerID="8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964" Jan 29 14:00:41 crc kubenswrapper[4787]: E0129 14:00:41.982888 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964\": container with ID starting with 8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964 not found: ID does not exist" containerID="8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.983035 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964"} err="failed to get container status \"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964\": rpc error: code = NotFound desc = could not find container \"8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964\": container with ID starting with 8985869db8c546c92ed3a88df9d58314d6692b667e35c261a8fd254505a3a964 not found: ID does not exist" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.983165 4787 scope.go:117] "RemoveContainer" containerID="da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3" Jan 29 14:00:41 crc kubenswrapper[4787]: E0129 14:00:41.983620 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3\": container with ID starting with da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3 not found: ID does not exist" containerID="da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.983667 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3"} err="failed to get container status \"da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3\": rpc error: code = NotFound desc = could not find container \"da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3\": container with ID starting with da030d7c3fb74835ab088e532513fcc96b6dfb3c73c13147d2661b7ce47947c3 not found: ID does not exist" Jan 29 14:00:41 crc kubenswrapper[4787]: I0129 14:00:41.998495 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" path="/var/lib/kubelet/pods/5fdf82dc-cc82-4924-9366-a3c2a5666818/volumes" Jan 29 14:00:58 crc kubenswrapper[4787]: I0129 14:00:58.395148 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:00:58 crc kubenswrapper[4787]: I0129 14:00:58.395827 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:00:58 crc kubenswrapper[4787]: I0129 14:00:58.396289 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:00:58 crc kubenswrapper[4787]: I0129 14:00:58.398024 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:00:58 crc kubenswrapper[4787]: I0129 14:00:58.398142 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f" gracePeriod=600 Jan 29 14:00:59 crc kubenswrapper[4787]: I0129 14:00:59.030195 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f" exitCode=0 Jan 29 14:00:59 crc kubenswrapper[4787]: I0129 14:00:59.030256 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f"} Jan 29 14:00:59 crc kubenswrapper[4787]: I0129 14:00:59.030567 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377"} Jan 29 14:00:59 crc kubenswrapper[4787]: I0129 14:00:59.030592 4787 scope.go:117] "RemoveContainer" containerID="55158fd789e8ebe852a637c847a95a56a744479c2bce1d88cb4821d5cfb1cd57" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.542609 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:15 crc kubenswrapper[4787]: E0129 14:01:15.543891 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="extract-utilities" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.543917 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="extract-utilities" Jan 29 14:01:15 crc kubenswrapper[4787]: E0129 14:01:15.543956 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="registry-server" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.543968 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="registry-server" Jan 29 14:01:15 crc kubenswrapper[4787]: E0129 14:01:15.543993 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="extract-content" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.544007 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="extract-content" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.544266 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fdf82dc-cc82-4924-9366-a3c2a5666818" containerName="registry-server" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.546438 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.560912 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.662079 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.662515 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dn92\" (UniqueName: \"kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.662564 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.763603 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dn92\" (UniqueName: \"kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.763669 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.763744 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.764314 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.764344 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.782320 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dn92\" (UniqueName: \"kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92\") pod \"community-operators-jkw94\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:15 crc kubenswrapper[4787]: I0129 14:01:15.913136 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:16 crc kubenswrapper[4787]: I0129 14:01:16.406791 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:17 crc kubenswrapper[4787]: I0129 14:01:17.196294 4787 generic.go:334] "Generic (PLEG): container finished" podID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerID="6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d" exitCode=0 Jan 29 14:01:17 crc kubenswrapper[4787]: I0129 14:01:17.196654 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerDied","Data":"6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d"} Jan 29 14:01:17 crc kubenswrapper[4787]: I0129 14:01:17.196686 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerStarted","Data":"1a288e450d1843f074434d506725cec261a6bac3fc94435bf830275f34413a0a"} Jan 29 14:01:19 crc kubenswrapper[4787]: I0129 14:01:19.221259 4787 generic.go:334] "Generic (PLEG): container finished" podID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerID="479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201" exitCode=0 Jan 29 14:01:19 crc kubenswrapper[4787]: I0129 14:01:19.221355 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerDied","Data":"479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201"} Jan 29 14:01:20 crc kubenswrapper[4787]: I0129 14:01:20.234083 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerStarted","Data":"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314"} Jan 29 14:01:20 crc kubenswrapper[4787]: I0129 14:01:20.256494 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jkw94" podStartSLOduration=2.815584014 podStartE2EDuration="5.256476368s" podCreationTimestamp="2026-01-29 14:01:15 +0000 UTC" firstStartedPulling="2026-01-29 14:01:17.198167741 +0000 UTC m=+2715.959428017" lastFinishedPulling="2026-01-29 14:01:19.639060085 +0000 UTC m=+2718.400320371" observedRunningTime="2026-01-29 14:01:20.251537558 +0000 UTC m=+2719.012797894" watchObservedRunningTime="2026-01-29 14:01:20.256476368 +0000 UTC m=+2719.017736644" Jan 29 14:01:25 crc kubenswrapper[4787]: I0129 14:01:25.914166 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:25 crc kubenswrapper[4787]: I0129 14:01:25.914768 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:25 crc kubenswrapper[4787]: I0129 14:01:25.981733 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:26 crc kubenswrapper[4787]: I0129 14:01:26.319919 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:26 crc kubenswrapper[4787]: I0129 14:01:26.378699 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.296103 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jkw94" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="registry-server" containerID="cri-o://9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314" gracePeriod=2 Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.722973 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.891969 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dn92\" (UniqueName: \"kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92\") pod \"10b1b07a-4e2b-4156-b096-afe0abba798c\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.892077 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content\") pod \"10b1b07a-4e2b-4156-b096-afe0abba798c\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.892112 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities\") pod \"10b1b07a-4e2b-4156-b096-afe0abba798c\" (UID: \"10b1b07a-4e2b-4156-b096-afe0abba798c\") " Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.893222 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities" (OuterVolumeSpecName: "utilities") pod "10b1b07a-4e2b-4156-b096-afe0abba798c" (UID: "10b1b07a-4e2b-4156-b096-afe0abba798c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.897446 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92" (OuterVolumeSpecName: "kube-api-access-9dn92") pod "10b1b07a-4e2b-4156-b096-afe0abba798c" (UID: "10b1b07a-4e2b-4156-b096-afe0abba798c"). InnerVolumeSpecName "kube-api-access-9dn92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.995670 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dn92\" (UniqueName: \"kubernetes.io/projected/10b1b07a-4e2b-4156-b096-afe0abba798c-kube-api-access-9dn92\") on node \"crc\" DevicePath \"\"" Jan 29 14:01:28 crc kubenswrapper[4787]: I0129 14:01:28.995715 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.084774 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10b1b07a-4e2b-4156-b096-afe0abba798c" (UID: "10b1b07a-4e2b-4156-b096-afe0abba798c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.096762 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10b1b07a-4e2b-4156-b096-afe0abba798c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.307633 4787 generic.go:334] "Generic (PLEG): container finished" podID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerID="9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314" exitCode=0 Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.307674 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerDied","Data":"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314"} Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.307700 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jkw94" event={"ID":"10b1b07a-4e2b-4156-b096-afe0abba798c","Type":"ContainerDied","Data":"1a288e450d1843f074434d506725cec261a6bac3fc94435bf830275f34413a0a"} Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.307716 4787 scope.go:117] "RemoveContainer" containerID="9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.308294 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jkw94" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.341947 4787 scope.go:117] "RemoveContainer" containerID="479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.343246 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.361836 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jkw94"] Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.373260 4787 scope.go:117] "RemoveContainer" containerID="6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.409798 4787 scope.go:117] "RemoveContainer" containerID="9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314" Jan 29 14:01:29 crc kubenswrapper[4787]: E0129 14:01:29.410301 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314\": container with ID starting with 9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314 not found: ID does not exist" containerID="9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.410347 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314"} err="failed to get container status \"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314\": rpc error: code = NotFound desc = could not find container \"9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314\": container with ID starting with 9e2c1cf43c24c63255f587b43cce1b0f50cb9edc8d8347d9462082025ada1314 not found: ID does not exist" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.410380 4787 scope.go:117] "RemoveContainer" containerID="479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201" Jan 29 14:01:29 crc kubenswrapper[4787]: E0129 14:01:29.410895 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201\": container with ID starting with 479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201 not found: ID does not exist" containerID="479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.410934 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201"} err="failed to get container status \"479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201\": rpc error: code = NotFound desc = could not find container \"479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201\": container with ID starting with 479cf021516ce9c25a473d29a56f5b30e2d7e0268fad83fcb660d1a9617fc201 not found: ID does not exist" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.410956 4787 scope.go:117] "RemoveContainer" containerID="6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d" Jan 29 14:01:29 crc kubenswrapper[4787]: E0129 14:01:29.411248 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d\": container with ID starting with 6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d not found: ID does not exist" containerID="6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.411280 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d"} err="failed to get container status \"6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d\": rpc error: code = NotFound desc = could not find container \"6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d\": container with ID starting with 6512657cc89e64168f56b5ebd9acd2264f9753b2c73789aca9a36aca34a08d3d not found: ID does not exist" Jan 29 14:01:29 crc kubenswrapper[4787]: I0129 14:01:29.998783 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" path="/var/lib/kubelet/pods/10b1b07a-4e2b-4156-b096-afe0abba798c/volumes" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.233576 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:01:47 crc kubenswrapper[4787]: E0129 14:01:47.234805 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="extract-utilities" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.234829 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="extract-utilities" Jan 29 14:01:47 crc kubenswrapper[4787]: E0129 14:01:47.234851 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="registry-server" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.234864 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="registry-server" Jan 29 14:01:47 crc kubenswrapper[4787]: E0129 14:01:47.234900 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="extract-content" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.234913 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="extract-content" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.235190 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="10b1b07a-4e2b-4156-b096-afe0abba798c" containerName="registry-server" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.237399 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.253194 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.253500 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpcsb\" (UniqueName: \"kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.253683 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.256700 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.354852 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpcsb\" (UniqueName: \"kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.354905 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.354973 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.355542 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.355556 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.386286 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpcsb\" (UniqueName: \"kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb\") pod \"redhat-operators-tbnf4\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:47 crc kubenswrapper[4787]: I0129 14:01:47.567363 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:48 crc kubenswrapper[4787]: I0129 14:01:48.005144 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:01:48 crc kubenswrapper[4787]: I0129 14:01:48.516365 4787 generic.go:334] "Generic (PLEG): container finished" podID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerID="a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47" exitCode=0 Jan 29 14:01:48 crc kubenswrapper[4787]: I0129 14:01:48.516414 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerDied","Data":"a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47"} Jan 29 14:01:48 crc kubenswrapper[4787]: I0129 14:01:48.516742 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerStarted","Data":"e2d3c94b52ac1a5cabcab7cbb6fb7c3a7d63bf291b68ab5e241b5ba87dbc7a07"} Jan 29 14:01:49 crc kubenswrapper[4787]: I0129 14:01:49.526410 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerStarted","Data":"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262"} Jan 29 14:01:50 crc kubenswrapper[4787]: I0129 14:01:50.538685 4787 generic.go:334] "Generic (PLEG): container finished" podID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerID="5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262" exitCode=0 Jan 29 14:01:50 crc kubenswrapper[4787]: I0129 14:01:50.538765 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerDied","Data":"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262"} Jan 29 14:01:51 crc kubenswrapper[4787]: I0129 14:01:51.559142 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerStarted","Data":"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a"} Jan 29 14:01:51 crc kubenswrapper[4787]: I0129 14:01:51.582067 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tbnf4" podStartSLOduration=2.00958884 podStartE2EDuration="4.582039514s" podCreationTimestamp="2026-01-29 14:01:47 +0000 UTC" firstStartedPulling="2026-01-29 14:01:48.517931622 +0000 UTC m=+2747.279191898" lastFinishedPulling="2026-01-29 14:01:51.090382256 +0000 UTC m=+2749.851642572" observedRunningTime="2026-01-29 14:01:51.579856082 +0000 UTC m=+2750.341116398" watchObservedRunningTime="2026-01-29 14:01:51.582039514 +0000 UTC m=+2750.343299820" Jan 29 14:01:57 crc kubenswrapper[4787]: I0129 14:01:57.567834 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:57 crc kubenswrapper[4787]: I0129 14:01:57.568252 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:01:58 crc kubenswrapper[4787]: I0129 14:01:58.612329 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tbnf4" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="registry-server" probeResult="failure" output=< Jan 29 14:01:58 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 14:01:58 crc kubenswrapper[4787]: > Jan 29 14:02:07 crc kubenswrapper[4787]: I0129 14:02:07.639240 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:02:07 crc kubenswrapper[4787]: I0129 14:02:07.717661 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:02:07 crc kubenswrapper[4787]: I0129 14:02:07.887623 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:02:08 crc kubenswrapper[4787]: I0129 14:02:08.691683 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tbnf4" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="registry-server" containerID="cri-o://d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a" gracePeriod=2 Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.107397 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.212562 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content\") pod \"07abb017-c0ca-468f-98fb-397ad5fd7b69\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.212653 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities\") pod \"07abb017-c0ca-468f-98fb-397ad5fd7b69\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.212713 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpcsb\" (UniqueName: \"kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb\") pod \"07abb017-c0ca-468f-98fb-397ad5fd7b69\" (UID: \"07abb017-c0ca-468f-98fb-397ad5fd7b69\") " Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.213612 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities" (OuterVolumeSpecName: "utilities") pod "07abb017-c0ca-468f-98fb-397ad5fd7b69" (UID: "07abb017-c0ca-468f-98fb-397ad5fd7b69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.224317 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb" (OuterVolumeSpecName: "kube-api-access-zpcsb") pod "07abb017-c0ca-468f-98fb-397ad5fd7b69" (UID: "07abb017-c0ca-468f-98fb-397ad5fd7b69"). InnerVolumeSpecName "kube-api-access-zpcsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.314534 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.314587 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpcsb\" (UniqueName: \"kubernetes.io/projected/07abb017-c0ca-468f-98fb-397ad5fd7b69-kube-api-access-zpcsb\") on node \"crc\" DevicePath \"\"" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.400562 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07abb017-c0ca-468f-98fb-397ad5fd7b69" (UID: "07abb017-c0ca-468f-98fb-397ad5fd7b69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.416662 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07abb017-c0ca-468f-98fb-397ad5fd7b69-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.704029 4787 generic.go:334] "Generic (PLEG): container finished" podID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerID="d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a" exitCode=0 Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.704102 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerDied","Data":"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a"} Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.704137 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tbnf4" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.704157 4787 scope.go:117] "RemoveContainer" containerID="d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.704144 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tbnf4" event={"ID":"07abb017-c0ca-468f-98fb-397ad5fd7b69","Type":"ContainerDied","Data":"e2d3c94b52ac1a5cabcab7cbb6fb7c3a7d63bf291b68ab5e241b5ba87dbc7a07"} Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.763440 4787 scope.go:117] "RemoveContainer" containerID="5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.774188 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.782677 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tbnf4"] Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.801248 4787 scope.go:117] "RemoveContainer" containerID="a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.832449 4787 scope.go:117] "RemoveContainer" containerID="d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a" Jan 29 14:02:09 crc kubenswrapper[4787]: E0129 14:02:09.832961 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a\": container with ID starting with d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a not found: ID does not exist" containerID="d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.833135 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a"} err="failed to get container status \"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a\": rpc error: code = NotFound desc = could not find container \"d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a\": container with ID starting with d00322747a2e221b64e8491cd5e6f42eeef79c90234792a017addd9d6da0e19a not found: ID does not exist" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.833235 4787 scope.go:117] "RemoveContainer" containerID="5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262" Jan 29 14:02:09 crc kubenswrapper[4787]: E0129 14:02:09.833815 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262\": container with ID starting with 5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262 not found: ID does not exist" containerID="5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.833887 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262"} err="failed to get container status \"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262\": rpc error: code = NotFound desc = could not find container \"5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262\": container with ID starting with 5d12983f02db86c54cc95e2a136df060e1bc771c08b74feac8c0417e8feb1262 not found: ID does not exist" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.833927 4787 scope.go:117] "RemoveContainer" containerID="a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47" Jan 29 14:02:09 crc kubenswrapper[4787]: E0129 14:02:09.834405 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47\": container with ID starting with a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47 not found: ID does not exist" containerID="a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47" Jan 29 14:02:09 crc kubenswrapper[4787]: I0129 14:02:09.834517 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47"} err="failed to get container status \"a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47\": rpc error: code = NotFound desc = could not find container \"a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47\": container with ID starting with a278926cc0c2c83baac3b58171c6c401d6d2adc86d7ac4f1e31636c78d9f4f47 not found: ID does not exist" Jan 29 14:02:10 crc kubenswrapper[4787]: I0129 14:02:10.002507 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" path="/var/lib/kubelet/pods/07abb017-c0ca-468f-98fb-397ad5fd7b69/volumes" Jan 29 14:02:58 crc kubenswrapper[4787]: I0129 14:02:58.394808 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:02:58 crc kubenswrapper[4787]: I0129 14:02:58.395599 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.158555 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:16 crc kubenswrapper[4787]: E0129 14:03:16.159867 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="extract-content" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.159891 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="extract-content" Jan 29 14:03:16 crc kubenswrapper[4787]: E0129 14:03:16.159914 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="registry-server" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.159927 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="registry-server" Jan 29 14:03:16 crc kubenswrapper[4787]: E0129 14:03:16.159952 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="extract-utilities" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.159966 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="extract-utilities" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.160255 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="07abb017-c0ca-468f-98fb-397ad5fd7b69" containerName="registry-server" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.162199 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.180399 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.297667 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.297761 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8tvl\" (UniqueName: \"kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.297807 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.399364 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8tvl\" (UniqueName: \"kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.399426 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.399511 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.400104 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.401361 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.418511 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8tvl\" (UniqueName: \"kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl\") pod \"redhat-marketplace-j49vw\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.492571 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:16 crc kubenswrapper[4787]: I0129 14:03:16.942743 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:17 crc kubenswrapper[4787]: I0129 14:03:17.314407 4787 generic.go:334] "Generic (PLEG): container finished" podID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerID="64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c" exitCode=0 Jan 29 14:03:17 crc kubenswrapper[4787]: I0129 14:03:17.314527 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerDied","Data":"64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c"} Jan 29 14:03:17 crc kubenswrapper[4787]: I0129 14:03:17.314776 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerStarted","Data":"e969764544b1bf0cc0df86239ce65286202a361cd2850d3255b09729f8bad1dc"} Jan 29 14:03:18 crc kubenswrapper[4787]: I0129 14:03:18.324447 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerStarted","Data":"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764"} Jan 29 14:03:19 crc kubenswrapper[4787]: I0129 14:03:19.338226 4787 generic.go:334] "Generic (PLEG): container finished" podID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerID="0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764" exitCode=0 Jan 29 14:03:19 crc kubenswrapper[4787]: I0129 14:03:19.338889 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerDied","Data":"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764"} Jan 29 14:03:20 crc kubenswrapper[4787]: I0129 14:03:20.349089 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerStarted","Data":"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8"} Jan 29 14:03:20 crc kubenswrapper[4787]: I0129 14:03:20.388941 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j49vw" podStartSLOduration=1.93467748 podStartE2EDuration="4.388919824s" podCreationTimestamp="2026-01-29 14:03:16 +0000 UTC" firstStartedPulling="2026-01-29 14:03:17.318095131 +0000 UTC m=+2836.079355397" lastFinishedPulling="2026-01-29 14:03:19.772337465 +0000 UTC m=+2838.533597741" observedRunningTime="2026-01-29 14:03:20.38314859 +0000 UTC m=+2839.144408906" watchObservedRunningTime="2026-01-29 14:03:20.388919824 +0000 UTC m=+2839.150180110" Jan 29 14:03:26 crc kubenswrapper[4787]: I0129 14:03:26.493571 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:26 crc kubenswrapper[4787]: I0129 14:03:26.494167 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:26 crc kubenswrapper[4787]: I0129 14:03:26.565832 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:27 crc kubenswrapper[4787]: I0129 14:03:27.479838 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:27 crc kubenswrapper[4787]: I0129 14:03:27.530215 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:28 crc kubenswrapper[4787]: I0129 14:03:28.395024 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:03:28 crc kubenswrapper[4787]: I0129 14:03:28.395560 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:03:29 crc kubenswrapper[4787]: I0129 14:03:29.432854 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j49vw" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="registry-server" containerID="cri-o://1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8" gracePeriod=2 Jan 29 14:03:29 crc kubenswrapper[4787]: I0129 14:03:29.877629 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.018460 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities\") pod \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.018594 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content\") pod \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.018794 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8tvl\" (UniqueName: \"kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl\") pod \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\" (UID: \"ddcb5699-4011-4d45-b90b-6ef1fc01106b\") " Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.020553 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities" (OuterVolumeSpecName: "utilities") pod "ddcb5699-4011-4d45-b90b-6ef1fc01106b" (UID: "ddcb5699-4011-4d45-b90b-6ef1fc01106b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.033845 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl" (OuterVolumeSpecName: "kube-api-access-v8tvl") pod "ddcb5699-4011-4d45-b90b-6ef1fc01106b" (UID: "ddcb5699-4011-4d45-b90b-6ef1fc01106b"). InnerVolumeSpecName "kube-api-access-v8tvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.059054 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddcb5699-4011-4d45-b90b-6ef1fc01106b" (UID: "ddcb5699-4011-4d45-b90b-6ef1fc01106b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.121712 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.121774 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddcb5699-4011-4d45-b90b-6ef1fc01106b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.121796 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8tvl\" (UniqueName: \"kubernetes.io/projected/ddcb5699-4011-4d45-b90b-6ef1fc01106b-kube-api-access-v8tvl\") on node \"crc\" DevicePath \"\"" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.449845 4787 generic.go:334] "Generic (PLEG): container finished" podID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerID="1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8" exitCode=0 Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.449920 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerDied","Data":"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8"} Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.449961 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j49vw" event={"ID":"ddcb5699-4011-4d45-b90b-6ef1fc01106b","Type":"ContainerDied","Data":"e969764544b1bf0cc0df86239ce65286202a361cd2850d3255b09729f8bad1dc"} Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.449985 4787 scope.go:117] "RemoveContainer" containerID="1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.449999 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j49vw" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.486309 4787 scope.go:117] "RemoveContainer" containerID="0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.510687 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.522185 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j49vw"] Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.529858 4787 scope.go:117] "RemoveContainer" containerID="64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.569726 4787 scope.go:117] "RemoveContainer" containerID="1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8" Jan 29 14:03:30 crc kubenswrapper[4787]: E0129 14:03:30.570595 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8\": container with ID starting with 1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8 not found: ID does not exist" containerID="1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.570651 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8"} err="failed to get container status \"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8\": rpc error: code = NotFound desc = could not find container \"1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8\": container with ID starting with 1bfe362193cd9912f30359b7b9ec3fbc3c53e7e74528231b71987257d644fda8 not found: ID does not exist" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.570688 4787 scope.go:117] "RemoveContainer" containerID="0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764" Jan 29 14:03:30 crc kubenswrapper[4787]: E0129 14:03:30.571161 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764\": container with ID starting with 0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764 not found: ID does not exist" containerID="0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.571231 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764"} err="failed to get container status \"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764\": rpc error: code = NotFound desc = could not find container \"0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764\": container with ID starting with 0bc069e4544d2a9e5d04dd1f3b2aa62b437f627d7abe76d78682a63ceed67764 not found: ID does not exist" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.571278 4787 scope.go:117] "RemoveContainer" containerID="64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c" Jan 29 14:03:30 crc kubenswrapper[4787]: E0129 14:03:30.571741 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c\": container with ID starting with 64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c not found: ID does not exist" containerID="64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c" Jan 29 14:03:30 crc kubenswrapper[4787]: I0129 14:03:30.571930 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c"} err="failed to get container status \"64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c\": rpc error: code = NotFound desc = could not find container \"64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c\": container with ID starting with 64c754e3151bf275ffcc3368c8a70f77280953f041ca36bec8d1fe4c9c6ffe7c not found: ID does not exist" Jan 29 14:03:31 crc kubenswrapper[4787]: I0129 14:03:31.997101 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" path="/var/lib/kubelet/pods/ddcb5699-4011-4d45-b90b-6ef1fc01106b/volumes" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.395215 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.395848 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.395902 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.396652 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.396718 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" gracePeriod=600 Jan 29 14:03:58 crc kubenswrapper[4787]: E0129 14:03:58.519200 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.695625 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" exitCode=0 Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.695692 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377"} Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.695731 4787 scope.go:117] "RemoveContainer" containerID="8f4edcfb36ff75237f162443e5562df0609202866e3190ec1b7a155d6dc6ae2f" Jan 29 14:03:58 crc kubenswrapper[4787]: I0129 14:03:58.696974 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:03:58 crc kubenswrapper[4787]: E0129 14:03:58.697522 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:04:12 crc kubenswrapper[4787]: I0129 14:04:12.986289 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:04:12 crc kubenswrapper[4787]: E0129 14:04:12.987133 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:04:27 crc kubenswrapper[4787]: I0129 14:04:27.987675 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:04:27 crc kubenswrapper[4787]: E0129 14:04:27.988918 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:04:38 crc kubenswrapper[4787]: I0129 14:04:38.985307 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:04:38 crc kubenswrapper[4787]: E0129 14:04:38.986300 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:04:52 crc kubenswrapper[4787]: I0129 14:04:52.987019 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:04:52 crc kubenswrapper[4787]: E0129 14:04:52.988101 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:05:04 crc kubenswrapper[4787]: I0129 14:05:04.986928 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:05:04 crc kubenswrapper[4787]: E0129 14:05:04.987650 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:05:15 crc kubenswrapper[4787]: I0129 14:05:15.985689 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:05:15 crc kubenswrapper[4787]: E0129 14:05:15.986650 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:05:29 crc kubenswrapper[4787]: I0129 14:05:29.985291 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:05:29 crc kubenswrapper[4787]: E0129 14:05:29.986035 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:05:41 crc kubenswrapper[4787]: I0129 14:05:41.989946 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:05:41 crc kubenswrapper[4787]: E0129 14:05:41.990886 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:05:52 crc kubenswrapper[4787]: I0129 14:05:52.986423 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:05:52 crc kubenswrapper[4787]: E0129 14:05:52.989568 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:06:04 crc kubenswrapper[4787]: I0129 14:06:04.985693 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:06:04 crc kubenswrapper[4787]: E0129 14:06:04.986806 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:06:16 crc kubenswrapper[4787]: I0129 14:06:16.985740 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:06:16 crc kubenswrapper[4787]: E0129 14:06:16.986762 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:06:30 crc kubenswrapper[4787]: I0129 14:06:30.985744 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:06:30 crc kubenswrapper[4787]: E0129 14:06:30.986689 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:06:43 crc kubenswrapper[4787]: I0129 14:06:43.985742 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:06:43 crc kubenswrapper[4787]: E0129 14:06:43.986443 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:06:57 crc kubenswrapper[4787]: I0129 14:06:57.986526 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:06:57 crc kubenswrapper[4787]: E0129 14:06:57.987567 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:07:11 crc kubenswrapper[4787]: I0129 14:07:11.990166 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:07:11 crc kubenswrapper[4787]: E0129 14:07:11.991009 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:07:22 crc kubenswrapper[4787]: I0129 14:07:22.986870 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:07:22 crc kubenswrapper[4787]: E0129 14:07:22.988213 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:07:34 crc kubenswrapper[4787]: I0129 14:07:34.987192 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:07:34 crc kubenswrapper[4787]: E0129 14:07:34.987966 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:07:47 crc kubenswrapper[4787]: I0129 14:07:47.986882 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:07:47 crc kubenswrapper[4787]: E0129 14:07:47.987699 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:08:01 crc kubenswrapper[4787]: I0129 14:08:01.994563 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:08:01 crc kubenswrapper[4787]: E0129 14:08:01.995786 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:08:14 crc kubenswrapper[4787]: I0129 14:08:14.985491 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:08:14 crc kubenswrapper[4787]: E0129 14:08:14.986252 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:08:25 crc kubenswrapper[4787]: I0129 14:08:25.986760 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:08:25 crc kubenswrapper[4787]: E0129 14:08:25.987802 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:08:38 crc kubenswrapper[4787]: I0129 14:08:38.986196 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:08:38 crc kubenswrapper[4787]: E0129 14:08:38.987082 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:08:49 crc kubenswrapper[4787]: I0129 14:08:49.986125 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:08:49 crc kubenswrapper[4787]: E0129 14:08:49.987201 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:09:00 crc kubenswrapper[4787]: I0129 14:09:00.985807 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:09:01 crc kubenswrapper[4787]: I0129 14:09:01.423233 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18"} Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.679404 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:42 crc kubenswrapper[4787]: E0129 14:10:42.680751 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="extract-utilities" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.680781 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="extract-utilities" Jan 29 14:10:42 crc kubenswrapper[4787]: E0129 14:10:42.680815 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="extract-content" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.680905 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="extract-content" Jan 29 14:10:42 crc kubenswrapper[4787]: E0129 14:10:42.680932 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="registry-server" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.680940 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="registry-server" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.681287 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddcb5699-4011-4d45-b90b-6ef1fc01106b" containerName="registry-server" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.682821 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.705345 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.777536 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8h5p\" (UniqueName: \"kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.777820 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.777936 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.879328 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.879444 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.879624 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8h5p\" (UniqueName: \"kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.880385 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.880385 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:42 crc kubenswrapper[4787]: I0129 14:10:42.900433 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8h5p\" (UniqueName: \"kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p\") pod \"certified-operators-j8nnz\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:43 crc kubenswrapper[4787]: I0129 14:10:43.014958 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:43 crc kubenswrapper[4787]: I0129 14:10:43.557398 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:43 crc kubenswrapper[4787]: W0129 14:10:43.565578 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a91b173_1efe_4fa4_a8ba_1b061581cca3.slice/crio-fe75bf68da8bf326964bbb10a54860672b691db8650bb68bceabbd72b57cd7c5 WatchSource:0}: Error finding container fe75bf68da8bf326964bbb10a54860672b691db8650bb68bceabbd72b57cd7c5: Status 404 returned error can't find the container with id fe75bf68da8bf326964bbb10a54860672b691db8650bb68bceabbd72b57cd7c5 Jan 29 14:10:44 crc kubenswrapper[4787]: I0129 14:10:44.360346 4787 generic.go:334] "Generic (PLEG): container finished" podID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerID="574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48" exitCode=0 Jan 29 14:10:44 crc kubenswrapper[4787]: I0129 14:10:44.360410 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerDied","Data":"574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48"} Jan 29 14:10:44 crc kubenswrapper[4787]: I0129 14:10:44.360722 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerStarted","Data":"fe75bf68da8bf326964bbb10a54860672b691db8650bb68bceabbd72b57cd7c5"} Jan 29 14:10:44 crc kubenswrapper[4787]: I0129 14:10:44.363129 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:10:45 crc kubenswrapper[4787]: I0129 14:10:45.370376 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerStarted","Data":"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1"} Jan 29 14:10:46 crc kubenswrapper[4787]: I0129 14:10:46.384985 4787 generic.go:334] "Generic (PLEG): container finished" podID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerID="fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1" exitCode=0 Jan 29 14:10:46 crc kubenswrapper[4787]: I0129 14:10:46.385107 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerDied","Data":"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1"} Jan 29 14:10:47 crc kubenswrapper[4787]: I0129 14:10:47.395106 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerStarted","Data":"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255"} Jan 29 14:10:47 crc kubenswrapper[4787]: I0129 14:10:47.431700 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j8nnz" podStartSLOduration=2.988622545 podStartE2EDuration="5.431667667s" podCreationTimestamp="2026-01-29 14:10:42 +0000 UTC" firstStartedPulling="2026-01-29 14:10:44.362808208 +0000 UTC m=+3283.124068494" lastFinishedPulling="2026-01-29 14:10:46.80585333 +0000 UTC m=+3285.567113616" observedRunningTime="2026-01-29 14:10:47.413179777 +0000 UTC m=+3286.174440063" watchObservedRunningTime="2026-01-29 14:10:47.431667667 +0000 UTC m=+3286.192927953" Jan 29 14:10:53 crc kubenswrapper[4787]: I0129 14:10:53.015334 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:53 crc kubenswrapper[4787]: I0129 14:10:53.015774 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:53 crc kubenswrapper[4787]: I0129 14:10:53.096923 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:53 crc kubenswrapper[4787]: I0129 14:10:53.517053 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:53 crc kubenswrapper[4787]: I0129 14:10:53.591350 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:55 crc kubenswrapper[4787]: I0129 14:10:55.464373 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j8nnz" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="registry-server" containerID="cri-o://67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255" gracePeriod=2 Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.032331 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.199336 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8h5p\" (UniqueName: \"kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p\") pod \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.199447 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content\") pod \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.200765 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities\") pod \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\" (UID: \"9a91b173-1efe-4fa4-a8ba-1b061581cca3\") " Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.201692 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities" (OuterVolumeSpecName: "utilities") pod "9a91b173-1efe-4fa4-a8ba-1b061581cca3" (UID: "9a91b173-1efe-4fa4-a8ba-1b061581cca3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.205861 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p" (OuterVolumeSpecName: "kube-api-access-l8h5p") pod "9a91b173-1efe-4fa4-a8ba-1b061581cca3" (UID: "9a91b173-1efe-4fa4-a8ba-1b061581cca3"). InnerVolumeSpecName "kube-api-access-l8h5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.303040 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8h5p\" (UniqueName: \"kubernetes.io/projected/9a91b173-1efe-4fa4-a8ba-1b061581cca3-kube-api-access-l8h5p\") on node \"crc\" DevicePath \"\"" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.303076 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.477095 4787 generic.go:334] "Generic (PLEG): container finished" podID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerID="67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255" exitCode=0 Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.477141 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerDied","Data":"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255"} Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.477178 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j8nnz" event={"ID":"9a91b173-1efe-4fa4-a8ba-1b061581cca3","Type":"ContainerDied","Data":"fe75bf68da8bf326964bbb10a54860672b691db8650bb68bceabbd72b57cd7c5"} Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.477200 4787 scope.go:117] "RemoveContainer" containerID="67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.477232 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j8nnz" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.505616 4787 scope.go:117] "RemoveContainer" containerID="fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.529075 4787 scope.go:117] "RemoveContainer" containerID="574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.577305 4787 scope.go:117] "RemoveContainer" containerID="67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255" Jan 29 14:10:56 crc kubenswrapper[4787]: E0129 14:10:56.577953 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255\": container with ID starting with 67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255 not found: ID does not exist" containerID="67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.578069 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255"} err="failed to get container status \"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255\": rpc error: code = NotFound desc = could not find container \"67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255\": container with ID starting with 67e2d55c5a71f517a7be8320c9cfd3723f2618481ab8d8586e7f979bcf911255 not found: ID does not exist" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.578212 4787 scope.go:117] "RemoveContainer" containerID="fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1" Jan 29 14:10:56 crc kubenswrapper[4787]: E0129 14:10:56.578852 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1\": container with ID starting with fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1 not found: ID does not exist" containerID="fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.578894 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1"} err="failed to get container status \"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1\": rpc error: code = NotFound desc = could not find container \"fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1\": container with ID starting with fefa76b74e35cd72b44c38e329ad6e4614f7fa3ef54bbdf680f0dc10f62717c1 not found: ID does not exist" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.578920 4787 scope.go:117] "RemoveContainer" containerID="574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48" Jan 29 14:10:56 crc kubenswrapper[4787]: E0129 14:10:56.579219 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48\": container with ID starting with 574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48 not found: ID does not exist" containerID="574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.579323 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48"} err="failed to get container status \"574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48\": rpc error: code = NotFound desc = could not find container \"574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48\": container with ID starting with 574c01574acfd56746835f455a3b9d22ab93f69af7151f9e4c4ddf8ff022bb48 not found: ID does not exist" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.859858 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a91b173-1efe-4fa4-a8ba-1b061581cca3" (UID: "9a91b173-1efe-4fa4-a8ba-1b061581cca3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:10:56 crc kubenswrapper[4787]: I0129 14:10:56.916314 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a91b173-1efe-4fa4-a8ba-1b061581cca3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:10:57 crc kubenswrapper[4787]: I0129 14:10:57.139650 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:57 crc kubenswrapper[4787]: I0129 14:10:57.151380 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j8nnz"] Jan 29 14:10:58 crc kubenswrapper[4787]: I0129 14:10:58.002098 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" path="/var/lib/kubelet/pods/9a91b173-1efe-4fa4-a8ba-1b061581cca3/volumes" Jan 29 14:11:28 crc kubenswrapper[4787]: I0129 14:11:28.394643 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:11:28 crc kubenswrapper[4787]: I0129 14:11:28.395354 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.186018 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:39 crc kubenswrapper[4787]: E0129 14:11:39.187219 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="registry-server" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.187245 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="registry-server" Jan 29 14:11:39 crc kubenswrapper[4787]: E0129 14:11:39.187289 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="extract-utilities" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.187302 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="extract-utilities" Jan 29 14:11:39 crc kubenswrapper[4787]: E0129 14:11:39.187341 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="extract-content" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.187355 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="extract-content" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.187672 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a91b173-1efe-4fa4-a8ba-1b061581cca3" containerName="registry-server" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.189666 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.201035 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.331484 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcmtt\" (UniqueName: \"kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.331585 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.331813 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.433056 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.433157 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.433230 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcmtt\" (UniqueName: \"kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.433771 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.433833 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.463060 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcmtt\" (UniqueName: \"kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt\") pod \"community-operators-g5z4t\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:39 crc kubenswrapper[4787]: I0129 14:11:39.518656 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:40 crc kubenswrapper[4787]: I0129 14:11:40.054049 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:40 crc kubenswrapper[4787]: I0129 14:11:40.897346 4787 generic.go:334] "Generic (PLEG): container finished" podID="b810fb71-4aea-413e-8554-6b45d37ec850" containerID="d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa" exitCode=0 Jan 29 14:11:40 crc kubenswrapper[4787]: I0129 14:11:40.897557 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerDied","Data":"d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa"} Jan 29 14:11:40 crc kubenswrapper[4787]: I0129 14:11:40.897763 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerStarted","Data":"0f852ca1478fdb30fcc2d5383858c900da3d8e4ef6a3ef851294f8702183e0a7"} Jan 29 14:11:42 crc kubenswrapper[4787]: I0129 14:11:42.920762 4787 generic.go:334] "Generic (PLEG): container finished" podID="b810fb71-4aea-413e-8554-6b45d37ec850" containerID="f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166" exitCode=0 Jan 29 14:11:42 crc kubenswrapper[4787]: I0129 14:11:42.920896 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerDied","Data":"f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166"} Jan 29 14:11:43 crc kubenswrapper[4787]: I0129 14:11:43.932273 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerStarted","Data":"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed"} Jan 29 14:11:43 crc kubenswrapper[4787]: I0129 14:11:43.955159 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g5z4t" podStartSLOduration=2.485113509 podStartE2EDuration="4.955138281s" podCreationTimestamp="2026-01-29 14:11:39 +0000 UTC" firstStartedPulling="2026-01-29 14:11:40.899291997 +0000 UTC m=+3339.660552273" lastFinishedPulling="2026-01-29 14:11:43.369316759 +0000 UTC m=+3342.130577045" observedRunningTime="2026-01-29 14:11:43.953207066 +0000 UTC m=+3342.714467342" watchObservedRunningTime="2026-01-29 14:11:43.955138281 +0000 UTC m=+3342.716398577" Jan 29 14:11:49 crc kubenswrapper[4787]: I0129 14:11:49.518967 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:49 crc kubenswrapper[4787]: I0129 14:11:49.519423 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:49 crc kubenswrapper[4787]: I0129 14:11:49.615571 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:50 crc kubenswrapper[4787]: I0129 14:11:50.037908 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:50 crc kubenswrapper[4787]: I0129 14:11:50.096117 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.000293 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g5z4t" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="registry-server" containerID="cri-o://607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed" gracePeriod=2 Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.417793 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.533177 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities\") pod \"b810fb71-4aea-413e-8554-6b45d37ec850\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.533242 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content\") pod \"b810fb71-4aea-413e-8554-6b45d37ec850\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.533334 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcmtt\" (UniqueName: \"kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt\") pod \"b810fb71-4aea-413e-8554-6b45d37ec850\" (UID: \"b810fb71-4aea-413e-8554-6b45d37ec850\") " Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.534830 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities" (OuterVolumeSpecName: "utilities") pod "b810fb71-4aea-413e-8554-6b45d37ec850" (UID: "b810fb71-4aea-413e-8554-6b45d37ec850"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.541902 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt" (OuterVolumeSpecName: "kube-api-access-zcmtt") pod "b810fb71-4aea-413e-8554-6b45d37ec850" (UID: "b810fb71-4aea-413e-8554-6b45d37ec850"). InnerVolumeSpecName "kube-api-access-zcmtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.635107 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.635962 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcmtt\" (UniqueName: \"kubernetes.io/projected/b810fb71-4aea-413e-8554-6b45d37ec850-kube-api-access-zcmtt\") on node \"crc\" DevicePath \"\"" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.771753 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b810fb71-4aea-413e-8554-6b45d37ec850" (UID: "b810fb71-4aea-413e-8554-6b45d37ec850"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:11:52 crc kubenswrapper[4787]: I0129 14:11:52.839887 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b810fb71-4aea-413e-8554-6b45d37ec850-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.017975 4787 generic.go:334] "Generic (PLEG): container finished" podID="b810fb71-4aea-413e-8554-6b45d37ec850" containerID="607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed" exitCode=0 Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.018043 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5z4t" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.018039 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerDied","Data":"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed"} Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.018176 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5z4t" event={"ID":"b810fb71-4aea-413e-8554-6b45d37ec850","Type":"ContainerDied","Data":"0f852ca1478fdb30fcc2d5383858c900da3d8e4ef6a3ef851294f8702183e0a7"} Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.018204 4787 scope.go:117] "RemoveContainer" containerID="607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.058853 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.059255 4787 scope.go:117] "RemoveContainer" containerID="f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.064614 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g5z4t"] Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.110344 4787 scope.go:117] "RemoveContainer" containerID="d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.142396 4787 scope.go:117] "RemoveContainer" containerID="607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed" Jan 29 14:11:53 crc kubenswrapper[4787]: E0129 14:11:53.142799 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed\": container with ID starting with 607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed not found: ID does not exist" containerID="607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.142850 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed"} err="failed to get container status \"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed\": rpc error: code = NotFound desc = could not find container \"607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed\": container with ID starting with 607993e8856616ca9a876d8f56c0996fb6d6712e92f3dc14e38b008e164ca4ed not found: ID does not exist" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.142882 4787 scope.go:117] "RemoveContainer" containerID="f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166" Jan 29 14:11:53 crc kubenswrapper[4787]: E0129 14:11:53.143434 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166\": container with ID starting with f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166 not found: ID does not exist" containerID="f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.143517 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166"} err="failed to get container status \"f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166\": rpc error: code = NotFound desc = could not find container \"f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166\": container with ID starting with f611fa19c9c97086bfc8a611b099c193df19aa1b6e9c8a8781fc1cf0b0ed6166 not found: ID does not exist" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.143532 4787 scope.go:117] "RemoveContainer" containerID="d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa" Jan 29 14:11:53 crc kubenswrapper[4787]: E0129 14:11:53.143853 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa\": container with ID starting with d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa not found: ID does not exist" containerID="d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.143896 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa"} err="failed to get container status \"d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa\": rpc error: code = NotFound desc = could not find container \"d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa\": container with ID starting with d843dd734c1fe0423c3d75b5d33484917c2ff158d3f052c49a199f093354ddaa not found: ID does not exist" Jan 29 14:11:53 crc kubenswrapper[4787]: I0129 14:11:53.997716 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" path="/var/lib/kubelet/pods/b810fb71-4aea-413e-8554-6b45d37ec850/volumes" Jan 29 14:11:58 crc kubenswrapper[4787]: I0129 14:11:58.395533 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:11:58 crc kubenswrapper[4787]: I0129 14:11:58.396069 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:12:28 crc kubenswrapper[4787]: I0129 14:12:28.394397 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:12:28 crc kubenswrapper[4787]: I0129 14:12:28.395188 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:12:28 crc kubenswrapper[4787]: I0129 14:12:28.395270 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:12:28 crc kubenswrapper[4787]: I0129 14:12:28.396366 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:12:28 crc kubenswrapper[4787]: I0129 14:12:28.396611 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18" gracePeriod=600 Jan 29 14:12:29 crc kubenswrapper[4787]: I0129 14:12:29.346740 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18" exitCode=0 Jan 29 14:12:29 crc kubenswrapper[4787]: I0129 14:12:29.346806 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18"} Jan 29 14:12:29 crc kubenswrapper[4787]: I0129 14:12:29.347344 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2"} Jan 29 14:12:29 crc kubenswrapper[4787]: I0129 14:12:29.347370 4787 scope.go:117] "RemoveContainer" containerID="18ba4a970da8a40dc494e25b7d2a1d49d79809bfe3ff81d6777318c4009c3377" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.322014 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:03 crc kubenswrapper[4787]: E0129 14:13:03.323101 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="extract-utilities" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.323124 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="extract-utilities" Jan 29 14:13:03 crc kubenswrapper[4787]: E0129 14:13:03.323156 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="registry-server" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.323166 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="registry-server" Jan 29 14:13:03 crc kubenswrapper[4787]: E0129 14:13:03.323191 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="extract-content" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.323200 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="extract-content" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.323376 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="b810fb71-4aea-413e-8554-6b45d37ec850" containerName="registry-server" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.332404 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.360628 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.399852 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.399980 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.400016 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdrln\" (UniqueName: \"kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.502617 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.502666 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdrln\" (UniqueName: \"kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.502694 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.503165 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.503506 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.537870 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdrln\" (UniqueName: \"kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln\") pod \"redhat-operators-fxp9q\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:03 crc kubenswrapper[4787]: I0129 14:13:03.700525 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:04 crc kubenswrapper[4787]: I0129 14:13:04.136919 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:04 crc kubenswrapper[4787]: I0129 14:13:04.743019 4787 generic.go:334] "Generic (PLEG): container finished" podID="9195ac13-b302-4b97-984f-616994984ea7" containerID="ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0" exitCode=0 Jan 29 14:13:04 crc kubenswrapper[4787]: I0129 14:13:04.743834 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerDied","Data":"ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0"} Jan 29 14:13:04 crc kubenswrapper[4787]: I0129 14:13:04.743860 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerStarted","Data":"d0ae5b8e0e3a957e35c927166055c41cfa5448a3fd443cf9f38f1e87f888100d"} Jan 29 14:13:06 crc kubenswrapper[4787]: I0129 14:13:06.764547 4787 generic.go:334] "Generic (PLEG): container finished" podID="9195ac13-b302-4b97-984f-616994984ea7" containerID="e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a" exitCode=0 Jan 29 14:13:06 crc kubenswrapper[4787]: I0129 14:13:06.764645 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerDied","Data":"e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a"} Jan 29 14:13:07 crc kubenswrapper[4787]: I0129 14:13:07.777802 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerStarted","Data":"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611"} Jan 29 14:13:07 crc kubenswrapper[4787]: I0129 14:13:07.818119 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fxp9q" podStartSLOduration=2.15802308 podStartE2EDuration="4.818093127s" podCreationTimestamp="2026-01-29 14:13:03 +0000 UTC" firstStartedPulling="2026-01-29 14:13:04.744338024 +0000 UTC m=+3423.505598290" lastFinishedPulling="2026-01-29 14:13:07.404408041 +0000 UTC m=+3426.165668337" observedRunningTime="2026-01-29 14:13:07.807744645 +0000 UTC m=+3426.569004961" watchObservedRunningTime="2026-01-29 14:13:07.818093127 +0000 UTC m=+3426.579353443" Jan 29 14:13:13 crc kubenswrapper[4787]: I0129 14:13:13.700661 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:13 crc kubenswrapper[4787]: I0129 14:13:13.701026 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:14 crc kubenswrapper[4787]: I0129 14:13:14.771726 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fxp9q" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="registry-server" probeResult="failure" output=< Jan 29 14:13:14 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 14:13:14 crc kubenswrapper[4787]: > Jan 29 14:13:23 crc kubenswrapper[4787]: I0129 14:13:23.762624 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:23 crc kubenswrapper[4787]: I0129 14:13:23.819194 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:24 crc kubenswrapper[4787]: I0129 14:13:24.003733 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:24 crc kubenswrapper[4787]: I0129 14:13:24.908880 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fxp9q" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="registry-server" containerID="cri-o://c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611" gracePeriod=2 Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.906872 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.923419 4787 generic.go:334] "Generic (PLEG): container finished" podID="9195ac13-b302-4b97-984f-616994984ea7" containerID="c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611" exitCode=0 Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.923539 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fxp9q" Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.923581 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerDied","Data":"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611"} Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.923631 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fxp9q" event={"ID":"9195ac13-b302-4b97-984f-616994984ea7","Type":"ContainerDied","Data":"d0ae5b8e0e3a957e35c927166055c41cfa5448a3fd443cf9f38f1e87f888100d"} Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.923661 4787 scope.go:117] "RemoveContainer" containerID="c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611" Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.971062 4787 scope.go:117] "RemoveContainer" containerID="e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a" Jan 29 14:13:25 crc kubenswrapper[4787]: I0129 14:13:25.996257 4787 scope.go:117] "RemoveContainer" containerID="ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.024824 4787 scope.go:117] "RemoveContainer" containerID="c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611" Jan 29 14:13:26 crc kubenswrapper[4787]: E0129 14:13:26.025301 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611\": container with ID starting with c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611 not found: ID does not exist" containerID="c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.025330 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611"} err="failed to get container status \"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611\": rpc error: code = NotFound desc = could not find container \"c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611\": container with ID starting with c471d093dec6e26f70b6a89de012624dc7485e0b858c6388fa134e0a4e958611 not found: ID does not exist" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.025352 4787 scope.go:117] "RemoveContainer" containerID="e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a" Jan 29 14:13:26 crc kubenswrapper[4787]: E0129 14:13:26.025828 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a\": container with ID starting with e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a not found: ID does not exist" containerID="e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.025903 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a"} err="failed to get container status \"e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a\": rpc error: code = NotFound desc = could not find container \"e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a\": container with ID starting with e5a7868fe0eb82fd39d7d4e344253359a7466df686e235f9ee9d1ef7e46f855a not found: ID does not exist" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.025955 4787 scope.go:117] "RemoveContainer" containerID="ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0" Jan 29 14:13:26 crc kubenswrapper[4787]: E0129 14:13:26.026342 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0\": container with ID starting with ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0 not found: ID does not exist" containerID="ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.026393 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0"} err="failed to get container status \"ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0\": rpc error: code = NotFound desc = could not find container \"ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0\": container with ID starting with ef3087e8e0b252d7b85d2a14a23d9a5a9019def8641f58e8d353b5f4efb968f0 not found: ID does not exist" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.042001 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content\") pod \"9195ac13-b302-4b97-984f-616994984ea7\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.042159 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities\") pod \"9195ac13-b302-4b97-984f-616994984ea7\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.042246 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdrln\" (UniqueName: \"kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln\") pod \"9195ac13-b302-4b97-984f-616994984ea7\" (UID: \"9195ac13-b302-4b97-984f-616994984ea7\") " Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.043525 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities" (OuterVolumeSpecName: "utilities") pod "9195ac13-b302-4b97-984f-616994984ea7" (UID: "9195ac13-b302-4b97-984f-616994984ea7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.052387 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln" (OuterVolumeSpecName: "kube-api-access-kdrln") pod "9195ac13-b302-4b97-984f-616994984ea7" (UID: "9195ac13-b302-4b97-984f-616994984ea7"). InnerVolumeSpecName "kube-api-access-kdrln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.145752 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdrln\" (UniqueName: \"kubernetes.io/projected/9195ac13-b302-4b97-984f-616994984ea7-kube-api-access-kdrln\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.145789 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.163867 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9195ac13-b302-4b97-984f-616994984ea7" (UID: "9195ac13-b302-4b97-984f-616994984ea7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.247821 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9195ac13-b302-4b97-984f-616994984ea7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.260594 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:26 crc kubenswrapper[4787]: I0129 14:13:26.266833 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fxp9q"] Jan 29 14:13:27 crc kubenswrapper[4787]: I0129 14:13:27.996929 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9195ac13-b302-4b97-984f-616994984ea7" path="/var/lib/kubelet/pods/9195ac13-b302-4b97-984f-616994984ea7/volumes" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.118117 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:37 crc kubenswrapper[4787]: E0129 14:13:37.119022 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="extract-utilities" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.119037 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="extract-utilities" Jan 29 14:13:37 crc kubenswrapper[4787]: E0129 14:13:37.119056 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="registry-server" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.119064 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="registry-server" Jan 29 14:13:37 crc kubenswrapper[4787]: E0129 14:13:37.119095 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="extract-content" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.119105 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="extract-content" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.119296 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="9195ac13-b302-4b97-984f-616994984ea7" containerName="registry-server" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.120822 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.141049 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.218360 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjkz9\" (UniqueName: \"kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.218529 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.218712 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.320486 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjkz9\" (UniqueName: \"kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.320552 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.320621 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.321124 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.321164 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.350369 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjkz9\" (UniqueName: \"kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9\") pod \"redhat-marketplace-bcv7g\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.439851 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:37 crc kubenswrapper[4787]: I0129 14:13:37.720864 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:38 crc kubenswrapper[4787]: I0129 14:13:38.082355 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerStarted","Data":"a793e998953e8a896cdec6e3d7ee0618c5f8f78e3b791a27bac2fc61296097fe"} Jan 29 14:13:38 crc kubenswrapper[4787]: I0129 14:13:38.082395 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerStarted","Data":"5ea8628a7c0056e0b59fa0addfe7e6e161b67a561de39457c557ed5d46ebf7cd"} Jan 29 14:13:39 crc kubenswrapper[4787]: I0129 14:13:39.092447 4787 generic.go:334] "Generic (PLEG): container finished" podID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerID="a793e998953e8a896cdec6e3d7ee0618c5f8f78e3b791a27bac2fc61296097fe" exitCode=0 Jan 29 14:13:39 crc kubenswrapper[4787]: I0129 14:13:39.092521 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerDied","Data":"a793e998953e8a896cdec6e3d7ee0618c5f8f78e3b791a27bac2fc61296097fe"} Jan 29 14:13:40 crc kubenswrapper[4787]: I0129 14:13:40.103407 4787 generic.go:334] "Generic (PLEG): container finished" podID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerID="6c6bb2f87daf2d40bfd7d7902397d191eea96e6061d850ea614bb75cb3ae6bae" exitCode=0 Jan 29 14:13:40 crc kubenswrapper[4787]: I0129 14:13:40.103481 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerDied","Data":"6c6bb2f87daf2d40bfd7d7902397d191eea96e6061d850ea614bb75cb3ae6bae"} Jan 29 14:13:41 crc kubenswrapper[4787]: I0129 14:13:41.111698 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerStarted","Data":"98aa3fed483cf78c744c794bf7f97025f0ccf1cbb35257ed4d86a986678e2188"} Jan 29 14:13:41 crc kubenswrapper[4787]: I0129 14:13:41.128105 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bcv7g" podStartSLOduration=1.5033180800000001 podStartE2EDuration="4.128076388s" podCreationTimestamp="2026-01-29 14:13:37 +0000 UTC" firstStartedPulling="2026-01-29 14:13:38.084440547 +0000 UTC m=+3456.845700863" lastFinishedPulling="2026-01-29 14:13:40.709198875 +0000 UTC m=+3459.470459171" observedRunningTime="2026-01-29 14:13:41.126282518 +0000 UTC m=+3459.887542804" watchObservedRunningTime="2026-01-29 14:13:41.128076388 +0000 UTC m=+3459.889336674" Jan 29 14:13:47 crc kubenswrapper[4787]: I0129 14:13:47.440886 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:47 crc kubenswrapper[4787]: I0129 14:13:47.441617 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:47 crc kubenswrapper[4787]: I0129 14:13:47.518927 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:48 crc kubenswrapper[4787]: I0129 14:13:48.240727 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:48 crc kubenswrapper[4787]: I0129 14:13:48.283329 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:50 crc kubenswrapper[4787]: I0129 14:13:50.187972 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bcv7g" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="registry-server" containerID="cri-o://98aa3fed483cf78c744c794bf7f97025f0ccf1cbb35257ed4d86a986678e2188" gracePeriod=2 Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.231257 4787 generic.go:334] "Generic (PLEG): container finished" podID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerID="98aa3fed483cf78c744c794bf7f97025f0ccf1cbb35257ed4d86a986678e2188" exitCode=0 Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.231669 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerDied","Data":"98aa3fed483cf78c744c794bf7f97025f0ccf1cbb35257ed4d86a986678e2188"} Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.402522 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.432733 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content\") pod \"8712d082-0189-42d4-ba1e-5ce82fa630fb\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.432861 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjkz9\" (UniqueName: \"kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9\") pod \"8712d082-0189-42d4-ba1e-5ce82fa630fb\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.432902 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities\") pod \"8712d082-0189-42d4-ba1e-5ce82fa630fb\" (UID: \"8712d082-0189-42d4-ba1e-5ce82fa630fb\") " Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.433809 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities" (OuterVolumeSpecName: "utilities") pod "8712d082-0189-42d4-ba1e-5ce82fa630fb" (UID: "8712d082-0189-42d4-ba1e-5ce82fa630fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.434189 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.442016 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9" (OuterVolumeSpecName: "kube-api-access-cjkz9") pod "8712d082-0189-42d4-ba1e-5ce82fa630fb" (UID: "8712d082-0189-42d4-ba1e-5ce82fa630fb"). InnerVolumeSpecName "kube-api-access-cjkz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.456386 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8712d082-0189-42d4-ba1e-5ce82fa630fb" (UID: "8712d082-0189-42d4-ba1e-5ce82fa630fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.535053 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8712d082-0189-42d4-ba1e-5ce82fa630fb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:51 crc kubenswrapper[4787]: I0129 14:13:51.535086 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjkz9\" (UniqueName: \"kubernetes.io/projected/8712d082-0189-42d4-ba1e-5ce82fa630fb-kube-api-access-cjkz9\") on node \"crc\" DevicePath \"\"" Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.244935 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bcv7g" event={"ID":"8712d082-0189-42d4-ba1e-5ce82fa630fb","Type":"ContainerDied","Data":"5ea8628a7c0056e0b59fa0addfe7e6e161b67a561de39457c557ed5d46ebf7cd"} Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.245015 4787 scope.go:117] "RemoveContainer" containerID="98aa3fed483cf78c744c794bf7f97025f0ccf1cbb35257ed4d86a986678e2188" Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.245048 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bcv7g" Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.278414 4787 scope.go:117] "RemoveContainer" containerID="6c6bb2f87daf2d40bfd7d7902397d191eea96e6061d850ea614bb75cb3ae6bae" Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.278438 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.285915 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bcv7g"] Jan 29 14:13:52 crc kubenswrapper[4787]: I0129 14:13:52.298685 4787 scope.go:117] "RemoveContainer" containerID="a793e998953e8a896cdec6e3d7ee0618c5f8f78e3b791a27bac2fc61296097fe" Jan 29 14:13:53 crc kubenswrapper[4787]: I0129 14:13:53.995193 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" path="/var/lib/kubelet/pods/8712d082-0189-42d4-ba1e-5ce82fa630fb/volumes" Jan 29 14:14:28 crc kubenswrapper[4787]: I0129 14:14:28.394362 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:14:28 crc kubenswrapper[4787]: I0129 14:14:28.395086 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:14:58 crc kubenswrapper[4787]: I0129 14:14:58.394414 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:14:58 crc kubenswrapper[4787]: I0129 14:14:58.395234 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.175250 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n"] Jan 29 14:15:00 crc kubenswrapper[4787]: E0129 14:15:00.176066 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="extract-content" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.176089 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="extract-content" Jan 29 14:15:00 crc kubenswrapper[4787]: E0129 14:15:00.176109 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="extract-utilities" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.176121 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="extract-utilities" Jan 29 14:15:00 crc kubenswrapper[4787]: E0129 14:15:00.176134 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="registry-server" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.176144 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="registry-server" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.176379 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="8712d082-0189-42d4-ba1e-5ce82fa630fb" containerName="registry-server" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.178074 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.181728 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.182855 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.197154 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n"] Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.331446 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wmlc\" (UniqueName: \"kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.331583 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.331704 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.432871 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wmlc\" (UniqueName: \"kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.433489 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.433744 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.435015 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.450180 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.463388 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wmlc\" (UniqueName: \"kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc\") pod \"collect-profiles-29494935-w9c5n\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:00 crc kubenswrapper[4787]: I0129 14:15:00.500310 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:01 crc kubenswrapper[4787]: I0129 14:15:01.002124 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n"] Jan 29 14:15:01 crc kubenswrapper[4787]: I0129 14:15:01.866592 4787 generic.go:334] "Generic (PLEG): container finished" podID="7abc4004-36f4-41cf-8dde-ab161078974e" containerID="93f4614b8631fb1f4dcdb2bbe9c3bdaf8b7f0decb495b66b15fc374685c1bc97" exitCode=0 Jan 29 14:15:01 crc kubenswrapper[4787]: I0129 14:15:01.866662 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" event={"ID":"7abc4004-36f4-41cf-8dde-ab161078974e","Type":"ContainerDied","Data":"93f4614b8631fb1f4dcdb2bbe9c3bdaf8b7f0decb495b66b15fc374685c1bc97"} Jan 29 14:15:01 crc kubenswrapper[4787]: I0129 14:15:01.866700 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" event={"ID":"7abc4004-36f4-41cf-8dde-ab161078974e","Type":"ContainerStarted","Data":"c0fac998814ca8c8da741e73edc82e993074b91dde04a3d3e09c59b6330d8a39"} Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.163750 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.302546 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume\") pod \"7abc4004-36f4-41cf-8dde-ab161078974e\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.302707 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume\") pod \"7abc4004-36f4-41cf-8dde-ab161078974e\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.302755 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wmlc\" (UniqueName: \"kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc\") pod \"7abc4004-36f4-41cf-8dde-ab161078974e\" (UID: \"7abc4004-36f4-41cf-8dde-ab161078974e\") " Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.304694 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume" (OuterVolumeSpecName: "config-volume") pod "7abc4004-36f4-41cf-8dde-ab161078974e" (UID: "7abc4004-36f4-41cf-8dde-ab161078974e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.311279 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7abc4004-36f4-41cf-8dde-ab161078974e" (UID: "7abc4004-36f4-41cf-8dde-ab161078974e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.311586 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc" (OuterVolumeSpecName: "kube-api-access-8wmlc") pod "7abc4004-36f4-41cf-8dde-ab161078974e" (UID: "7abc4004-36f4-41cf-8dde-ab161078974e"). InnerVolumeSpecName "kube-api-access-8wmlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.404670 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7abc4004-36f4-41cf-8dde-ab161078974e-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.404705 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7abc4004-36f4-41cf-8dde-ab161078974e-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.404717 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wmlc\" (UniqueName: \"kubernetes.io/projected/7abc4004-36f4-41cf-8dde-ab161078974e-kube-api-access-8wmlc\") on node \"crc\" DevicePath \"\"" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.885503 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" event={"ID":"7abc4004-36f4-41cf-8dde-ab161078974e","Type":"ContainerDied","Data":"c0fac998814ca8c8da741e73edc82e993074b91dde04a3d3e09c59b6330d8a39"} Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.885558 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0fac998814ca8c8da741e73edc82e993074b91dde04a3d3e09c59b6330d8a39" Jan 29 14:15:03 crc kubenswrapper[4787]: I0129 14:15:03.885616 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494935-w9c5n" Jan 29 14:15:04 crc kubenswrapper[4787]: I0129 14:15:04.249593 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k"] Jan 29 14:15:04 crc kubenswrapper[4787]: I0129 14:15:04.255774 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494890-wjm2k"] Jan 29 14:15:06 crc kubenswrapper[4787]: I0129 14:15:06.013155 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24c891d4-ad25-431d-a360-f23ac5d82a73" path="/var/lib/kubelet/pods/24c891d4-ad25-431d-a360-f23ac5d82a73/volumes" Jan 29 14:15:24 crc kubenswrapper[4787]: I0129 14:15:24.515407 4787 scope.go:117] "RemoveContainer" containerID="d8f366b8ede352cb860c23469298afd1bbd9047e2a69b72f2cb1f1d78b583a14" Jan 29 14:15:28 crc kubenswrapper[4787]: I0129 14:15:28.394807 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:15:28 crc kubenswrapper[4787]: I0129 14:15:28.395541 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:15:28 crc kubenswrapper[4787]: I0129 14:15:28.395617 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:15:28 crc kubenswrapper[4787]: I0129 14:15:28.396885 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:15:28 crc kubenswrapper[4787]: I0129 14:15:28.397077 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" gracePeriod=600 Jan 29 14:15:28 crc kubenswrapper[4787]: E0129 14:15:28.527747 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:15:29 crc kubenswrapper[4787]: I0129 14:15:29.115109 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" exitCode=0 Jan 29 14:15:29 crc kubenswrapper[4787]: I0129 14:15:29.115171 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2"} Jan 29 14:15:29 crc kubenswrapper[4787]: I0129 14:15:29.115217 4787 scope.go:117] "RemoveContainer" containerID="0f3a484dfaa6d61994bfc61d06cf97d51169ba0f46f41c05b2a5a7e6f5765f18" Jan 29 14:15:29 crc kubenswrapper[4787]: I0129 14:15:29.115733 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:15:29 crc kubenswrapper[4787]: E0129 14:15:29.116200 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:15:39 crc kubenswrapper[4787]: I0129 14:15:39.986912 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:15:39 crc kubenswrapper[4787]: E0129 14:15:39.987910 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:15:51 crc kubenswrapper[4787]: I0129 14:15:51.993952 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:15:51 crc kubenswrapper[4787]: E0129 14:15:51.995189 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:16:03 crc kubenswrapper[4787]: I0129 14:16:03.986450 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:16:03 crc kubenswrapper[4787]: E0129 14:16:03.987089 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:16:16 crc kubenswrapper[4787]: I0129 14:16:16.985842 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:16:16 crc kubenswrapper[4787]: E0129 14:16:16.986552 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:16:28 crc kubenswrapper[4787]: I0129 14:16:28.985592 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:16:28 crc kubenswrapper[4787]: E0129 14:16:28.986530 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:16:42 crc kubenswrapper[4787]: I0129 14:16:42.985559 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:16:42 crc kubenswrapper[4787]: E0129 14:16:42.986340 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:16:57 crc kubenswrapper[4787]: I0129 14:16:57.985906 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:16:57 crc kubenswrapper[4787]: E0129 14:16:57.987213 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:17:12 crc kubenswrapper[4787]: I0129 14:17:12.985550 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:17:12 crc kubenswrapper[4787]: E0129 14:17:12.986673 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:17:27 crc kubenswrapper[4787]: I0129 14:17:27.986270 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:17:27 crc kubenswrapper[4787]: E0129 14:17:27.987291 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:17:39 crc kubenswrapper[4787]: I0129 14:17:39.986085 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:17:39 crc kubenswrapper[4787]: E0129 14:17:39.987063 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:17:50 crc kubenswrapper[4787]: I0129 14:17:50.986627 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:17:50 crc kubenswrapper[4787]: E0129 14:17:50.987236 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:18:05 crc kubenswrapper[4787]: I0129 14:18:05.986084 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:18:05 crc kubenswrapper[4787]: E0129 14:18:05.987120 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:18:16 crc kubenswrapper[4787]: I0129 14:18:16.986156 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:18:16 crc kubenswrapper[4787]: E0129 14:18:16.986879 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:18:31 crc kubenswrapper[4787]: I0129 14:18:31.993889 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:18:31 crc kubenswrapper[4787]: E0129 14:18:31.995243 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:18:43 crc kubenswrapper[4787]: I0129 14:18:43.985907 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:18:43 crc kubenswrapper[4787]: E0129 14:18:43.987090 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:18:56 crc kubenswrapper[4787]: I0129 14:18:56.985720 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:18:56 crc kubenswrapper[4787]: E0129 14:18:56.986932 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:19:08 crc kubenswrapper[4787]: I0129 14:19:08.986656 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:19:08 crc kubenswrapper[4787]: E0129 14:19:08.987680 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:19:23 crc kubenswrapper[4787]: I0129 14:19:23.985646 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:19:23 crc kubenswrapper[4787]: E0129 14:19:23.988393 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:19:38 crc kubenswrapper[4787]: I0129 14:19:38.985516 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:19:38 crc kubenswrapper[4787]: E0129 14:19:38.986239 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:19:50 crc kubenswrapper[4787]: I0129 14:19:50.985906 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:19:50 crc kubenswrapper[4787]: E0129 14:19:50.986771 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:20:03 crc kubenswrapper[4787]: I0129 14:20:03.985896 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:20:03 crc kubenswrapper[4787]: E0129 14:20:03.987125 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:20:15 crc kubenswrapper[4787]: I0129 14:20:15.986729 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:20:15 crc kubenswrapper[4787]: E0129 14:20:15.987871 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:20:29 crc kubenswrapper[4787]: I0129 14:20:29.986933 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:20:30 crc kubenswrapper[4787]: I0129 14:20:30.862357 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf"} Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.367757 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:01 crc kubenswrapper[4787]: E0129 14:22:01.368738 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7abc4004-36f4-41cf-8dde-ab161078974e" containerName="collect-profiles" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.368754 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="7abc4004-36f4-41cf-8dde-ab161078974e" containerName="collect-profiles" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.368919 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="7abc4004-36f4-41cf-8dde-ab161078974e" containerName="collect-profiles" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.374529 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.376822 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.444086 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.444196 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.444231 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjjkf\" (UniqueName: \"kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.546009 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjjkf\" (UniqueName: \"kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.546106 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.546150 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.546635 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.546778 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.571124 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjjkf\" (UniqueName: \"kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf\") pod \"certified-operators-qll5d\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:01 crc kubenswrapper[4787]: I0129 14:22:01.694368 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:02 crc kubenswrapper[4787]: I0129 14:22:02.005218 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:02 crc kubenswrapper[4787]: I0129 14:22:02.600206 4787 generic.go:334] "Generic (PLEG): container finished" podID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerID="dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead" exitCode=0 Jan 29 14:22:02 crc kubenswrapper[4787]: I0129 14:22:02.600317 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerDied","Data":"dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead"} Jan 29 14:22:02 crc kubenswrapper[4787]: I0129 14:22:02.600613 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerStarted","Data":"dc2708e580c981ed7d327f3110260e96a68849afeaf1a3837b0627eae4a013dd"} Jan 29 14:22:02 crc kubenswrapper[4787]: I0129 14:22:02.603829 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:22:03 crc kubenswrapper[4787]: I0129 14:22:03.612242 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerStarted","Data":"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46"} Jan 29 14:22:04 crc kubenswrapper[4787]: I0129 14:22:04.624848 4787 generic.go:334] "Generic (PLEG): container finished" podID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerID="3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46" exitCode=0 Jan 29 14:22:04 crc kubenswrapper[4787]: I0129 14:22:04.624913 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerDied","Data":"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46"} Jan 29 14:22:05 crc kubenswrapper[4787]: I0129 14:22:05.638797 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerStarted","Data":"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd"} Jan 29 14:22:05 crc kubenswrapper[4787]: I0129 14:22:05.662108 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qll5d" podStartSLOduration=2.25139084 podStartE2EDuration="4.662091568s" podCreationTimestamp="2026-01-29 14:22:01 +0000 UTC" firstStartedPulling="2026-01-29 14:22:02.603178873 +0000 UTC m=+3961.364439189" lastFinishedPulling="2026-01-29 14:22:05.013879631 +0000 UTC m=+3963.775139917" observedRunningTime="2026-01-29 14:22:05.655973255 +0000 UTC m=+3964.417233531" watchObservedRunningTime="2026-01-29 14:22:05.662091568 +0000 UTC m=+3964.423351844" Jan 29 14:22:11 crc kubenswrapper[4787]: I0129 14:22:11.695376 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:11 crc kubenswrapper[4787]: I0129 14:22:11.695998 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:11 crc kubenswrapper[4787]: I0129 14:22:11.760443 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:12 crc kubenswrapper[4787]: I0129 14:22:12.735250 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:12 crc kubenswrapper[4787]: I0129 14:22:12.774476 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:14 crc kubenswrapper[4787]: I0129 14:22:14.711355 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qll5d" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="registry-server" containerID="cri-o://965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd" gracePeriod=2 Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.287563 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.380962 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities\") pod \"d3d0cdad-308b-41cd-aa71-2adcd316e994\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.381284 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjjkf\" (UniqueName: \"kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf\") pod \"d3d0cdad-308b-41cd-aa71-2adcd316e994\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.381429 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content\") pod \"d3d0cdad-308b-41cd-aa71-2adcd316e994\" (UID: \"d3d0cdad-308b-41cd-aa71-2adcd316e994\") " Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.382102 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities" (OuterVolumeSpecName: "utilities") pod "d3d0cdad-308b-41cd-aa71-2adcd316e994" (UID: "d3d0cdad-308b-41cd-aa71-2adcd316e994"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.388695 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf" (OuterVolumeSpecName: "kube-api-access-jjjkf") pod "d3d0cdad-308b-41cd-aa71-2adcd316e994" (UID: "d3d0cdad-308b-41cd-aa71-2adcd316e994"). InnerVolumeSpecName "kube-api-access-jjjkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.437363 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d3d0cdad-308b-41cd-aa71-2adcd316e994" (UID: "d3d0cdad-308b-41cd-aa71-2adcd316e994"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.483513 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.483545 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjjkf\" (UniqueName: \"kubernetes.io/projected/d3d0cdad-308b-41cd-aa71-2adcd316e994-kube-api-access-jjjkf\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.483558 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d3d0cdad-308b-41cd-aa71-2adcd316e994-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.722616 4787 generic.go:334] "Generic (PLEG): container finished" podID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerID="965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd" exitCode=0 Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.722726 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qll5d" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.722696 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerDied","Data":"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd"} Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.723162 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qll5d" event={"ID":"d3d0cdad-308b-41cd-aa71-2adcd316e994","Type":"ContainerDied","Data":"dc2708e580c981ed7d327f3110260e96a68849afeaf1a3837b0627eae4a013dd"} Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.723197 4787 scope.go:117] "RemoveContainer" containerID="965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.757062 4787 scope.go:117] "RemoveContainer" containerID="3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.770181 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.777903 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qll5d"] Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.783371 4787 scope.go:117] "RemoveContainer" containerID="dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.819310 4787 scope.go:117] "RemoveContainer" containerID="965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd" Jan 29 14:22:15 crc kubenswrapper[4787]: E0129 14:22:15.820185 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd\": container with ID starting with 965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd not found: ID does not exist" containerID="965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.820250 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd"} err="failed to get container status \"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd\": rpc error: code = NotFound desc = could not find container \"965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd\": container with ID starting with 965a43b723a4b8c12cb8b026f5065bc9340055fc132e58b6063bc92f6c4f7fdd not found: ID does not exist" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.820290 4787 scope.go:117] "RemoveContainer" containerID="3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46" Jan 29 14:22:15 crc kubenswrapper[4787]: E0129 14:22:15.820742 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46\": container with ID starting with 3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46 not found: ID does not exist" containerID="3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.820803 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46"} err="failed to get container status \"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46\": rpc error: code = NotFound desc = could not find container \"3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46\": container with ID starting with 3e2897d117625d268c570572192dd29809c16fee7afa4a5813723d383bb10f46 not found: ID does not exist" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.820842 4787 scope.go:117] "RemoveContainer" containerID="dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead" Jan 29 14:22:15 crc kubenswrapper[4787]: E0129 14:22:15.821168 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead\": container with ID starting with dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead not found: ID does not exist" containerID="dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead" Jan 29 14:22:15 crc kubenswrapper[4787]: I0129 14:22:15.821197 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead"} err="failed to get container status \"dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead\": rpc error: code = NotFound desc = could not find container \"dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead\": container with ID starting with dd46f19f029b0896525d64ed5f581856b9ed40c67d4ffdc382399eaebe1ccead not found: ID does not exist" Jan 29 14:22:16 crc kubenswrapper[4787]: I0129 14:22:16.005275 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" path="/var/lib/kubelet/pods/d3d0cdad-308b-41cd-aa71-2adcd316e994/volumes" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.198847 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:21 crc kubenswrapper[4787]: E0129 14:22:21.199958 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="registry-server" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.199980 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="registry-server" Jan 29 14:22:21 crc kubenswrapper[4787]: E0129 14:22:21.200010 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="extract-utilities" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.200023 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="extract-utilities" Jan 29 14:22:21 crc kubenswrapper[4787]: E0129 14:22:21.200042 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="extract-content" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.200053 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="extract-content" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.200298 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3d0cdad-308b-41cd-aa71-2adcd316e994" containerName="registry-server" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.201967 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.208283 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2ksd\" (UniqueName: \"kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.208345 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.208388 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.208742 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.309286 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.309418 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2ksd\" (UniqueName: \"kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.309449 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.309933 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.309974 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.576249 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2ksd\" (UniqueName: \"kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd\") pod \"community-operators-mfrws\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:21 crc kubenswrapper[4787]: I0129 14:22:21.869006 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:22 crc kubenswrapper[4787]: I0129 14:22:22.451775 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:22 crc kubenswrapper[4787]: I0129 14:22:22.787563 4787 generic.go:334] "Generic (PLEG): container finished" podID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerID="6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708" exitCode=0 Jan 29 14:22:22 crc kubenswrapper[4787]: I0129 14:22:22.787617 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerDied","Data":"6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708"} Jan 29 14:22:22 crc kubenswrapper[4787]: I0129 14:22:22.787648 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerStarted","Data":"63ef14a5c6385f0bf834037c543af8d963e1f920995e0f3df4e0c655e670f0e4"} Jan 29 14:22:24 crc kubenswrapper[4787]: I0129 14:22:24.815929 4787 generic.go:334] "Generic (PLEG): container finished" podID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerID="fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00" exitCode=0 Jan 29 14:22:24 crc kubenswrapper[4787]: I0129 14:22:24.816025 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerDied","Data":"fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00"} Jan 29 14:22:25 crc kubenswrapper[4787]: I0129 14:22:25.826984 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerStarted","Data":"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139"} Jan 29 14:22:25 crc kubenswrapper[4787]: I0129 14:22:25.861313 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mfrws" podStartSLOduration=2.366235896 podStartE2EDuration="4.861294029s" podCreationTimestamp="2026-01-29 14:22:21 +0000 UTC" firstStartedPulling="2026-01-29 14:22:22.790084037 +0000 UTC m=+3981.551344333" lastFinishedPulling="2026-01-29 14:22:25.28514218 +0000 UTC m=+3984.046402466" observedRunningTime="2026-01-29 14:22:25.855533656 +0000 UTC m=+3984.616793942" watchObservedRunningTime="2026-01-29 14:22:25.861294029 +0000 UTC m=+3984.622554305" Jan 29 14:22:31 crc kubenswrapper[4787]: I0129 14:22:31.869387 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:31 crc kubenswrapper[4787]: I0129 14:22:31.870070 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:31 crc kubenswrapper[4787]: I0129 14:22:31.941130 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:32 crc kubenswrapper[4787]: I0129 14:22:32.914761 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:32 crc kubenswrapper[4787]: I0129 14:22:32.966596 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:34 crc kubenswrapper[4787]: I0129 14:22:34.891860 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mfrws" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="registry-server" containerID="cri-o://00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139" gracePeriod=2 Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.809136 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.899519 4787 generic.go:334] "Generic (PLEG): container finished" podID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerID="00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139" exitCode=0 Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.899556 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mfrws" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.900556 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerDied","Data":"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139"} Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.900604 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mfrws" event={"ID":"373b80dd-de6a-4b2c-b27d-3a0eb0446b36","Type":"ContainerDied","Data":"63ef14a5c6385f0bf834037c543af8d963e1f920995e0f3df4e0c655e670f0e4"} Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.900626 4787 scope.go:117] "RemoveContainer" containerID="00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.919313 4787 scope.go:117] "RemoveContainer" containerID="fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.936421 4787 scope.go:117] "RemoveContainer" containerID="6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.962154 4787 scope.go:117] "RemoveContainer" containerID="00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139" Jan 29 14:22:35 crc kubenswrapper[4787]: E0129 14:22:35.962588 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139\": container with ID starting with 00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139 not found: ID does not exist" containerID="00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.962620 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139"} err="failed to get container status \"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139\": rpc error: code = NotFound desc = could not find container \"00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139\": container with ID starting with 00cb770a1fa851653ae601174e29d18d1052ea3c22dc6eb07dee234f05e1d139 not found: ID does not exist" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.962640 4787 scope.go:117] "RemoveContainer" containerID="fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00" Jan 29 14:22:35 crc kubenswrapper[4787]: E0129 14:22:35.962944 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00\": container with ID starting with fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00 not found: ID does not exist" containerID="fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.962965 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00"} err="failed to get container status \"fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00\": rpc error: code = NotFound desc = could not find container \"fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00\": container with ID starting with fe9d864486df8f88e651cff1293be5dcf0c7ebc324978a11172c34bec0fa3e00 not found: ID does not exist" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.962979 4787 scope.go:117] "RemoveContainer" containerID="6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708" Jan 29 14:22:35 crc kubenswrapper[4787]: E0129 14:22:35.963219 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708\": container with ID starting with 6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708 not found: ID does not exist" containerID="6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.963239 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708"} err="failed to get container status \"6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708\": rpc error: code = NotFound desc = could not find container \"6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708\": container with ID starting with 6177049c4e06051fb11004605366145556785c082a97fce80ab87be9f82d3708 not found: ID does not exist" Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.968149 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities\") pod \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.968189 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2ksd\" (UniqueName: \"kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd\") pod \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.968296 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content\") pod \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\" (UID: \"373b80dd-de6a-4b2c-b27d-3a0eb0446b36\") " Jan 29 14:22:35 crc kubenswrapper[4787]: I0129 14:22:35.972593 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities" (OuterVolumeSpecName: "utilities") pod "373b80dd-de6a-4b2c-b27d-3a0eb0446b36" (UID: "373b80dd-de6a-4b2c-b27d-3a0eb0446b36"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:35.996421 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd" (OuterVolumeSpecName: "kube-api-access-c2ksd") pod "373b80dd-de6a-4b2c-b27d-3a0eb0446b36" (UID: "373b80dd-de6a-4b2c-b27d-3a0eb0446b36"). InnerVolumeSpecName "kube-api-access-c2ksd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.046277 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "373b80dd-de6a-4b2c-b27d-3a0eb0446b36" (UID: "373b80dd-de6a-4b2c-b27d-3a0eb0446b36"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.069628 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.069662 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.069692 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2ksd\" (UniqueName: \"kubernetes.io/projected/373b80dd-de6a-4b2c-b27d-3a0eb0446b36-kube-api-access-c2ksd\") on node \"crc\" DevicePath \"\"" Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.237194 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:36 crc kubenswrapper[4787]: I0129 14:22:36.244436 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mfrws"] Jan 29 14:22:37 crc kubenswrapper[4787]: I0129 14:22:37.994153 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" path="/var/lib/kubelet/pods/373b80dd-de6a-4b2c-b27d-3a0eb0446b36/volumes" Jan 29 14:22:58 crc kubenswrapper[4787]: I0129 14:22:58.394937 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:22:58 crc kubenswrapper[4787]: I0129 14:22:58.395374 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.681292 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:27 crc kubenswrapper[4787]: E0129 14:23:27.682423 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="extract-utilities" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.682445 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="extract-utilities" Jan 29 14:23:27 crc kubenswrapper[4787]: E0129 14:23:27.682495 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="registry-server" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.682509 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="registry-server" Jan 29 14:23:27 crc kubenswrapper[4787]: E0129 14:23:27.682542 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="extract-content" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.682554 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="extract-content" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.682781 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="373b80dd-de6a-4b2c-b27d-3a0eb0446b36" containerName="registry-server" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.684117 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.693829 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.862239 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.862302 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmsm6\" (UniqueName: \"kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.862564 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.963405 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.963520 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.963550 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmsm6\" (UniqueName: \"kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.964084 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:27 crc kubenswrapper[4787]: I0129 14:23:27.964092 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:28 crc kubenswrapper[4787]: I0129 14:23:28.278168 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmsm6\" (UniqueName: \"kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6\") pod \"redhat-operators-vdjfd\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:28 crc kubenswrapper[4787]: I0129 14:23:28.303664 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:28 crc kubenswrapper[4787]: I0129 14:23:28.393900 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:23:28 crc kubenswrapper[4787]: I0129 14:23:28.393948 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:23:28 crc kubenswrapper[4787]: I0129 14:23:28.736134 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:29 crc kubenswrapper[4787]: I0129 14:23:29.328147 4787 generic.go:334] "Generic (PLEG): container finished" podID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerID="8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e" exitCode=0 Jan 29 14:23:29 crc kubenswrapper[4787]: I0129 14:23:29.328225 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerDied","Data":"8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e"} Jan 29 14:23:29 crc kubenswrapper[4787]: I0129 14:23:29.328471 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerStarted","Data":"96f3d5b92b54373b9f36c5afd46e7be1399b71016421d7d6fd68611e15937fd0"} Jan 29 14:23:30 crc kubenswrapper[4787]: I0129 14:23:30.335993 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerStarted","Data":"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617"} Jan 29 14:23:31 crc kubenswrapper[4787]: I0129 14:23:31.350154 4787 generic.go:334] "Generic (PLEG): container finished" podID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerID="623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617" exitCode=0 Jan 29 14:23:31 crc kubenswrapper[4787]: I0129 14:23:31.350216 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerDied","Data":"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617"} Jan 29 14:23:32 crc kubenswrapper[4787]: I0129 14:23:32.361521 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerStarted","Data":"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788"} Jan 29 14:23:32 crc kubenswrapper[4787]: I0129 14:23:32.382941 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vdjfd" podStartSLOduration=2.915399562 podStartE2EDuration="5.382914016s" podCreationTimestamp="2026-01-29 14:23:27 +0000 UTC" firstStartedPulling="2026-01-29 14:23:29.330974579 +0000 UTC m=+4048.092234895" lastFinishedPulling="2026-01-29 14:23:31.798489073 +0000 UTC m=+4050.559749349" observedRunningTime="2026-01-29 14:23:32.377523764 +0000 UTC m=+4051.138784050" watchObservedRunningTime="2026-01-29 14:23:32.382914016 +0000 UTC m=+4051.144174292" Jan 29 14:23:38 crc kubenswrapper[4787]: I0129 14:23:38.304676 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:38 crc kubenswrapper[4787]: I0129 14:23:38.305083 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:39 crc kubenswrapper[4787]: I0129 14:23:39.357589 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vdjfd" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="registry-server" probeResult="failure" output=< Jan 29 14:23:39 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 14:23:39 crc kubenswrapper[4787]: > Jan 29 14:23:48 crc kubenswrapper[4787]: I0129 14:23:48.380775 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:48 crc kubenswrapper[4787]: I0129 14:23:48.438293 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:48 crc kubenswrapper[4787]: I0129 14:23:48.632819 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:49 crc kubenswrapper[4787]: I0129 14:23:49.513004 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vdjfd" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="registry-server" containerID="cri-o://5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788" gracePeriod=2 Jan 29 14:23:49 crc kubenswrapper[4787]: I0129 14:23:49.992868 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.004896 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities\") pod \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.004983 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content\") pod \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.005026 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmsm6\" (UniqueName: \"kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6\") pod \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\" (UID: \"57ba73f7-1eff-4792-aba2-7dc5e9b60814\") " Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.005702 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities" (OuterVolumeSpecName: "utilities") pod "57ba73f7-1eff-4792-aba2-7dc5e9b60814" (UID: "57ba73f7-1eff-4792-aba2-7dc5e9b60814"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.016936 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6" (OuterVolumeSpecName: "kube-api-access-kmsm6") pod "57ba73f7-1eff-4792-aba2-7dc5e9b60814" (UID: "57ba73f7-1eff-4792-aba2-7dc5e9b60814"). InnerVolumeSpecName "kube-api-access-kmsm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.106739 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.106781 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmsm6\" (UniqueName: \"kubernetes.io/projected/57ba73f7-1eff-4792-aba2-7dc5e9b60814-kube-api-access-kmsm6\") on node \"crc\" DevicePath \"\"" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.130907 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57ba73f7-1eff-4792-aba2-7dc5e9b60814" (UID: "57ba73f7-1eff-4792-aba2-7dc5e9b60814"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.207639 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57ba73f7-1eff-4792-aba2-7dc5e9b60814-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.527337 4787 generic.go:334] "Generic (PLEG): container finished" podID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerID="5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788" exitCode=0 Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.527408 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerDied","Data":"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788"} Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.527541 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vdjfd" event={"ID":"57ba73f7-1eff-4792-aba2-7dc5e9b60814","Type":"ContainerDied","Data":"96f3d5b92b54373b9f36c5afd46e7be1399b71016421d7d6fd68611e15937fd0"} Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.527594 4787 scope.go:117] "RemoveContainer" containerID="5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.527892 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vdjfd" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.569949 4787 scope.go:117] "RemoveContainer" containerID="623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.588487 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.598118 4787 scope.go:117] "RemoveContainer" containerID="8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.598148 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vdjfd"] Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.644950 4787 scope.go:117] "RemoveContainer" containerID="5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788" Jan 29 14:23:50 crc kubenswrapper[4787]: E0129 14:23:50.645647 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788\": container with ID starting with 5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788 not found: ID does not exist" containerID="5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.645781 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788"} err="failed to get container status \"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788\": rpc error: code = NotFound desc = could not find container \"5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788\": container with ID starting with 5c4df72459feaeb910077b87bced2296b7f4fb522f3d07ab4931cc6e3148a788 not found: ID does not exist" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.645901 4787 scope.go:117] "RemoveContainer" containerID="623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617" Jan 29 14:23:50 crc kubenswrapper[4787]: E0129 14:23:50.646352 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617\": container with ID starting with 623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617 not found: ID does not exist" containerID="623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.646416 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617"} err="failed to get container status \"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617\": rpc error: code = NotFound desc = could not find container \"623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617\": container with ID starting with 623a9f3ed46ea29994cc4d3d1e02705bf182f56324a3ca90243b772fd25e6617 not found: ID does not exist" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.646484 4787 scope.go:117] "RemoveContainer" containerID="8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e" Jan 29 14:23:50 crc kubenswrapper[4787]: E0129 14:23:50.646977 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e\": container with ID starting with 8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e not found: ID does not exist" containerID="8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e" Jan 29 14:23:50 crc kubenswrapper[4787]: I0129 14:23:50.647020 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e"} err="failed to get container status \"8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e\": rpc error: code = NotFound desc = could not find container \"8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e\": container with ID starting with 8e6a2bf5971457fa663c9517d61f38e2e75e01d1d83eab3136f3d50afbf6104e not found: ID does not exist" Jan 29 14:23:52 crc kubenswrapper[4787]: I0129 14:23:52.004817 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" path="/var/lib/kubelet/pods/57ba73f7-1eff-4792-aba2-7dc5e9b60814/volumes" Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.394538 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.394652 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.394730 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.395739 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.395869 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf" gracePeriod=600 Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.601221 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf" exitCode=0 Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.601287 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf"} Jan 29 14:23:58 crc kubenswrapper[4787]: I0129 14:23:58.601723 4787 scope.go:117] "RemoveContainer" containerID="1450c76797b579f782c4fe31c152f9ca9db5228839671e508d7baaab0206d2c2" Jan 29 14:23:59 crc kubenswrapper[4787]: I0129 14:23:59.615873 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9"} Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.062511 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:03 crc kubenswrapper[4787]: E0129 14:24:03.063811 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="registry-server" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.063834 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="registry-server" Jan 29 14:24:03 crc kubenswrapper[4787]: E0129 14:24:03.063863 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="extract-utilities" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.063875 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="extract-utilities" Jan 29 14:24:03 crc kubenswrapper[4787]: E0129 14:24:03.063955 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="extract-content" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.063968 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="extract-content" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.064213 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="57ba73f7-1eff-4792-aba2-7dc5e9b60814" containerName="registry-server" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.065896 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.079652 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.205665 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m88m\" (UniqueName: \"kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.205730 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.205789 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.306834 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.306895 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.306988 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m88m\" (UniqueName: \"kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.307322 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.307340 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.326480 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m88m\" (UniqueName: \"kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m\") pod \"redhat-marketplace-b6m5l\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.402302 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:03 crc kubenswrapper[4787]: I0129 14:24:03.863073 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:04 crc kubenswrapper[4787]: I0129 14:24:04.666872 4787 generic.go:334] "Generic (PLEG): container finished" podID="42e28989-1087-429b-9def-ee26cf42f4b7" containerID="6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8" exitCode=0 Jan 29 14:24:04 crc kubenswrapper[4787]: I0129 14:24:04.667038 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerDied","Data":"6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8"} Jan 29 14:24:04 crc kubenswrapper[4787]: I0129 14:24:04.667191 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerStarted","Data":"cad15cc9921a1f27f8e771c4b3e93c8a75331b53bb14ba7be5d0b1a82d3b3398"} Jan 29 14:24:05 crc kubenswrapper[4787]: I0129 14:24:05.678560 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerStarted","Data":"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d"} Jan 29 14:24:06 crc kubenswrapper[4787]: I0129 14:24:06.687812 4787 generic.go:334] "Generic (PLEG): container finished" podID="42e28989-1087-429b-9def-ee26cf42f4b7" containerID="035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d" exitCode=0 Jan 29 14:24:06 crc kubenswrapper[4787]: I0129 14:24:06.687862 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerDied","Data":"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d"} Jan 29 14:24:07 crc kubenswrapper[4787]: I0129 14:24:07.707657 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerStarted","Data":"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503"} Jan 29 14:24:07 crc kubenswrapper[4787]: I0129 14:24:07.737716 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b6m5l" podStartSLOduration=2.203692214 podStartE2EDuration="4.737681989s" podCreationTimestamp="2026-01-29 14:24:03 +0000 UTC" firstStartedPulling="2026-01-29 14:24:04.669086981 +0000 UTC m=+4083.430347267" lastFinishedPulling="2026-01-29 14:24:07.203076766 +0000 UTC m=+4085.964337042" observedRunningTime="2026-01-29 14:24:07.730692772 +0000 UTC m=+4086.491953068" watchObservedRunningTime="2026-01-29 14:24:07.737681989 +0000 UTC m=+4086.498942305" Jan 29 14:24:13 crc kubenswrapper[4787]: I0129 14:24:13.403686 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:13 crc kubenswrapper[4787]: I0129 14:24:13.404242 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:13 crc kubenswrapper[4787]: I0129 14:24:13.465745 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:13 crc kubenswrapper[4787]: I0129 14:24:13.841972 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:13 crc kubenswrapper[4787]: I0129 14:24:13.899208 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:15 crc kubenswrapper[4787]: I0129 14:24:15.777550 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b6m5l" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="registry-server" containerID="cri-o://52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503" gracePeriod=2 Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.343751 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.517641 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content\") pod \"42e28989-1087-429b-9def-ee26cf42f4b7\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.517781 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m88m\" (UniqueName: \"kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m\") pod \"42e28989-1087-429b-9def-ee26cf42f4b7\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.517853 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities\") pod \"42e28989-1087-429b-9def-ee26cf42f4b7\" (UID: \"42e28989-1087-429b-9def-ee26cf42f4b7\") " Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.520038 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities" (OuterVolumeSpecName: "utilities") pod "42e28989-1087-429b-9def-ee26cf42f4b7" (UID: "42e28989-1087-429b-9def-ee26cf42f4b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.528006 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m" (OuterVolumeSpecName: "kube-api-access-6m88m") pod "42e28989-1087-429b-9def-ee26cf42f4b7" (UID: "42e28989-1087-429b-9def-ee26cf42f4b7"). InnerVolumeSpecName "kube-api-access-6m88m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.564521 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "42e28989-1087-429b-9def-ee26cf42f4b7" (UID: "42e28989-1087-429b-9def-ee26cf42f4b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.619767 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.619821 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m88m\" (UniqueName: \"kubernetes.io/projected/42e28989-1087-429b-9def-ee26cf42f4b7-kube-api-access-6m88m\") on node \"crc\" DevicePath \"\"" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.619836 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42e28989-1087-429b-9def-ee26cf42f4b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.794352 4787 generic.go:334] "Generic (PLEG): container finished" podID="42e28989-1087-429b-9def-ee26cf42f4b7" containerID="52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503" exitCode=0 Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.794440 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b6m5l" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.794440 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerDied","Data":"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503"} Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.795254 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b6m5l" event={"ID":"42e28989-1087-429b-9def-ee26cf42f4b7","Type":"ContainerDied","Data":"cad15cc9921a1f27f8e771c4b3e93c8a75331b53bb14ba7be5d0b1a82d3b3398"} Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.795293 4787 scope.go:117] "RemoveContainer" containerID="52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.823807 4787 scope.go:117] "RemoveContainer" containerID="035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d" Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.854762 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:16 crc kubenswrapper[4787]: I0129 14:24:16.867366 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b6m5l"] Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.185846 4787 scope.go:117] "RemoveContainer" containerID="6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.235512 4787 scope.go:117] "RemoveContainer" containerID="52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503" Jan 29 14:24:17 crc kubenswrapper[4787]: E0129 14:24:17.236068 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503\": container with ID starting with 52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503 not found: ID does not exist" containerID="52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.236230 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503"} err="failed to get container status \"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503\": rpc error: code = NotFound desc = could not find container \"52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503\": container with ID starting with 52e3a1c563b79e6af13936b82d1d333142150a9781391ff5d5fe0af9bedd4503 not found: ID does not exist" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.236372 4787 scope.go:117] "RemoveContainer" containerID="035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d" Jan 29 14:24:17 crc kubenswrapper[4787]: E0129 14:24:17.236910 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d\": container with ID starting with 035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d not found: ID does not exist" containerID="035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.236951 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d"} err="failed to get container status \"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d\": rpc error: code = NotFound desc = could not find container \"035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d\": container with ID starting with 035b7d0f6a3b265a60f02617571e423508f4714522db58c56c91cbf59d053a0d not found: ID does not exist" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.236981 4787 scope.go:117] "RemoveContainer" containerID="6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8" Jan 29 14:24:17 crc kubenswrapper[4787]: E0129 14:24:17.237329 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8\": container with ID starting with 6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8 not found: ID does not exist" containerID="6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8" Jan 29 14:24:17 crc kubenswrapper[4787]: I0129 14:24:17.237364 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8"} err="failed to get container status \"6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8\": rpc error: code = NotFound desc = could not find container \"6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8\": container with ID starting with 6ede54f1f1c2810df211fddb6014f6f4f40300e9bb58f3188b36af32800472f8 not found: ID does not exist" Jan 29 14:24:18 crc kubenswrapper[4787]: I0129 14:24:18.002910 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" path="/var/lib/kubelet/pods/42e28989-1087-429b-9def-ee26cf42f4b7/volumes" Jan 29 14:25:58 crc kubenswrapper[4787]: I0129 14:25:58.395041 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:25:58 crc kubenswrapper[4787]: I0129 14:25:58.396005 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:26:28 crc kubenswrapper[4787]: I0129 14:26:28.394199 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:26:28 crc kubenswrapper[4787]: I0129 14:26:28.394967 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:26:58 crc kubenswrapper[4787]: I0129 14:26:58.395195 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:26:58 crc kubenswrapper[4787]: I0129 14:26:58.395817 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:26:58 crc kubenswrapper[4787]: I0129 14:26:58.395870 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:26:58 crc kubenswrapper[4787]: I0129 14:26:58.396571 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:26:58 crc kubenswrapper[4787]: I0129 14:26:58.396653 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" gracePeriod=600 Jan 29 14:26:58 crc kubenswrapper[4787]: E0129 14:26:58.520347 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:26:59 crc kubenswrapper[4787]: I0129 14:26:59.213813 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" exitCode=0 Jan 29 14:26:59 crc kubenswrapper[4787]: I0129 14:26:59.213893 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9"} Jan 29 14:26:59 crc kubenswrapper[4787]: I0129 14:26:59.214124 4787 scope.go:117] "RemoveContainer" containerID="3ac1c87b91aecfeb0a0ff58d43369e656c79f643b3b5620facbc8db1354d7bbf" Jan 29 14:26:59 crc kubenswrapper[4787]: I0129 14:26:59.214714 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:26:59 crc kubenswrapper[4787]: E0129 14:26:59.215028 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:27:09 crc kubenswrapper[4787]: I0129 14:27:09.985585 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:27:09 crc kubenswrapper[4787]: E0129 14:27:09.986362 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:27:23 crc kubenswrapper[4787]: I0129 14:27:23.985821 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:27:23 crc kubenswrapper[4787]: E0129 14:27:23.987075 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:27:38 crc kubenswrapper[4787]: I0129 14:27:38.985933 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:27:38 crc kubenswrapper[4787]: E0129 14:27:38.986642 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:27:49 crc kubenswrapper[4787]: I0129 14:27:49.985706 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:27:49 crc kubenswrapper[4787]: E0129 14:27:49.986135 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:28:03 crc kubenswrapper[4787]: I0129 14:28:03.985849 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:28:03 crc kubenswrapper[4787]: E0129 14:28:03.986614 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:28:15 crc kubenswrapper[4787]: I0129 14:28:15.986308 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:28:15 crc kubenswrapper[4787]: E0129 14:28:15.988640 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:28:26 crc kubenswrapper[4787]: I0129 14:28:26.986388 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:28:26 crc kubenswrapper[4787]: E0129 14:28:26.988045 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:28:39 crc kubenswrapper[4787]: I0129 14:28:39.987319 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:28:39 crc kubenswrapper[4787]: E0129 14:28:39.988552 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:28:50 crc kubenswrapper[4787]: I0129 14:28:50.985633 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:28:50 crc kubenswrapper[4787]: E0129 14:28:50.986393 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:29:01 crc kubenswrapper[4787]: I0129 14:29:01.997220 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:29:02 crc kubenswrapper[4787]: E0129 14:29:01.998601 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:29:12 crc kubenswrapper[4787]: I0129 14:29:12.985693 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:29:12 crc kubenswrapper[4787]: E0129 14:29:12.986722 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:29:25 crc kubenswrapper[4787]: I0129 14:29:25.986338 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:29:25 crc kubenswrapper[4787]: E0129 14:29:25.987206 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:29:37 crc kubenswrapper[4787]: I0129 14:29:37.986747 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:29:37 crc kubenswrapper[4787]: E0129 14:29:37.987806 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:29:48 crc kubenswrapper[4787]: I0129 14:29:48.986353 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:29:48 crc kubenswrapper[4787]: E0129 14:29:48.987355 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.187987 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn"] Jan 29 14:30:00 crc kubenswrapper[4787]: E0129 14:30:00.190340 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="registry-server" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.190380 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="registry-server" Jan 29 14:30:00 crc kubenswrapper[4787]: E0129 14:30:00.190412 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="extract-utilities" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.190422 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="extract-utilities" Jan 29 14:30:00 crc kubenswrapper[4787]: E0129 14:30:00.190444 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="extract-content" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.190476 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="extract-content" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.190648 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="42e28989-1087-429b-9def-ee26cf42f4b7" containerName="registry-server" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.191162 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.193172 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.194125 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.195121 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn"] Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.287123 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.287185 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.287263 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn2mw\" (UniqueName: \"kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.388819 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.388881 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.388943 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn2mw\" (UniqueName: \"kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.389921 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.395077 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.404343 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn2mw\" (UniqueName: \"kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw\") pod \"collect-profiles-29494950-6mnqn\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.516074 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:00 crc kubenswrapper[4787]: I0129 14:30:00.949328 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn"] Jan 29 14:30:01 crc kubenswrapper[4787]: I0129 14:30:01.854613 4787 generic.go:334] "Generic (PLEG): container finished" podID="ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" containerID="9c14525096218d3ca8947d2631a88a8a6927a63ddae3becd7894240fddb42d3b" exitCode=0 Jan 29 14:30:01 crc kubenswrapper[4787]: I0129 14:30:01.854663 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" event={"ID":"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0","Type":"ContainerDied","Data":"9c14525096218d3ca8947d2631a88a8a6927a63ddae3becd7894240fddb42d3b"} Jan 29 14:30:01 crc kubenswrapper[4787]: I0129 14:30:01.854711 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" event={"ID":"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0","Type":"ContainerStarted","Data":"0f7ba498a3de9ac7932688ea454ada7187a29385b0d69acf581338665bab9a5b"} Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.145338 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.235650 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume\") pod \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.235701 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn2mw\" (UniqueName: \"kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw\") pod \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.235736 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume\") pod \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\" (UID: \"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0\") " Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.236744 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume" (OuterVolumeSpecName: "config-volume") pod "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" (UID: "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.242646 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" (UID: "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.243912 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw" (OuterVolumeSpecName: "kube-api-access-fn2mw") pod "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" (UID: "ddf5ee92-fecd-4061-9e3a-bbefcb523ad0"). InnerVolumeSpecName "kube-api-access-fn2mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.337595 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.337964 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn2mw\" (UniqueName: \"kubernetes.io/projected/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-kube-api-access-fn2mw\") on node \"crc\" DevicePath \"\"" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.337976 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddf5ee92-fecd-4061-9e3a-bbefcb523ad0-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.868016 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" event={"ID":"ddf5ee92-fecd-4061-9e3a-bbefcb523ad0","Type":"ContainerDied","Data":"0f7ba498a3de9ac7932688ea454ada7187a29385b0d69acf581338665bab9a5b"} Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.868058 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f7ba498a3de9ac7932688ea454ada7187a29385b0d69acf581338665bab9a5b" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.868065 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494950-6mnqn" Jan 29 14:30:03 crc kubenswrapper[4787]: I0129 14:30:03.986931 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:30:03 crc kubenswrapper[4787]: E0129 14:30:03.987235 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:30:04 crc kubenswrapper[4787]: I0129 14:30:04.220384 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl"] Jan 29 14:30:04 crc kubenswrapper[4787]: I0129 14:30:04.226999 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494905-t5btl"] Jan 29 14:30:06 crc kubenswrapper[4787]: I0129 14:30:06.022691 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="958a9bbd-27d4-490e-9ae6-eafcae5db33a" path="/var/lib/kubelet/pods/958a9bbd-27d4-490e-9ae6-eafcae5db33a/volumes" Jan 29 14:30:15 crc kubenswrapper[4787]: I0129 14:30:15.985874 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:30:15 crc kubenswrapper[4787]: E0129 14:30:15.987271 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:30:24 crc kubenswrapper[4787]: I0129 14:30:24.901080 4787 scope.go:117] "RemoveContainer" containerID="9f532fd3f159dced3d571fe0bd7522a5fdf04afd99159e58bddaaae262e687c6" Jan 29 14:30:28 crc kubenswrapper[4787]: I0129 14:30:28.985970 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:30:28 crc kubenswrapper[4787]: E0129 14:30:28.986748 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:30:40 crc kubenswrapper[4787]: I0129 14:30:40.988508 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:30:40 crc kubenswrapper[4787]: E0129 14:30:40.991127 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:30:52 crc kubenswrapper[4787]: I0129 14:30:52.987177 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:30:52 crc kubenswrapper[4787]: E0129 14:30:52.987693 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:31:03 crc kubenswrapper[4787]: I0129 14:31:03.986976 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:31:03 crc kubenswrapper[4787]: E0129 14:31:03.988495 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:31:15 crc kubenswrapper[4787]: I0129 14:31:15.986173 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:31:15 crc kubenswrapper[4787]: E0129 14:31:15.987365 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:31:27 crc kubenswrapper[4787]: I0129 14:31:27.986010 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:31:27 crc kubenswrapper[4787]: E0129 14:31:27.987137 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:31:42 crc kubenswrapper[4787]: I0129 14:31:42.986002 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:31:42 crc kubenswrapper[4787]: E0129 14:31:42.986693 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:31:53 crc kubenswrapper[4787]: I0129 14:31:53.986332 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:31:53 crc kubenswrapper[4787]: E0129 14:31:53.988978 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:32:05 crc kubenswrapper[4787]: I0129 14:32:05.985986 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:32:06 crc kubenswrapper[4787]: I0129 14:32:06.144697 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796"} Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.240738 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:07 crc kubenswrapper[4787]: E0129 14:32:07.241798 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" containerName="collect-profiles" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.241816 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" containerName="collect-profiles" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.241992 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddf5ee92-fecd-4061-9e3a-bbefcb523ad0" containerName="collect-profiles" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.243235 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.262585 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.301490 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5mj4\" (UniqueName: \"kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.301542 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.301608 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.402816 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.403608 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5mj4\" (UniqueName: \"kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.404044 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.403536 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.404447 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.425817 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5mj4\" (UniqueName: \"kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4\") pod \"certified-operators-lznlk\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:07 crc kubenswrapper[4787]: I0129 14:32:07.616349 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:08 crc kubenswrapper[4787]: I0129 14:32:07.999474 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:08 crc kubenswrapper[4787]: W0129 14:32:08.009539 4787 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fd218fb_194c_435d_8b15_42ce8ee48d52.slice/crio-dfe4a2fe9d2e7f2607648f01df9401d6f0a4fba3ccb9815b4436d3fa2bed8865 WatchSource:0}: Error finding container dfe4a2fe9d2e7f2607648f01df9401d6f0a4fba3ccb9815b4436d3fa2bed8865: Status 404 returned error can't find the container with id dfe4a2fe9d2e7f2607648f01df9401d6f0a4fba3ccb9815b4436d3fa2bed8865 Jan 29 14:32:08 crc kubenswrapper[4787]: I0129 14:32:08.157270 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerStarted","Data":"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a"} Jan 29 14:32:08 crc kubenswrapper[4787]: I0129 14:32:08.157577 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerStarted","Data":"dfe4a2fe9d2e7f2607648f01df9401d6f0a4fba3ccb9815b4436d3fa2bed8865"} Jan 29 14:32:09 crc kubenswrapper[4787]: I0129 14:32:09.165036 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerID="56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a" exitCode=0 Jan 29 14:32:09 crc kubenswrapper[4787]: I0129 14:32:09.165172 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerDied","Data":"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a"} Jan 29 14:32:09 crc kubenswrapper[4787]: I0129 14:32:09.168225 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:32:10 crc kubenswrapper[4787]: I0129 14:32:10.173882 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerStarted","Data":"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057"} Jan 29 14:32:11 crc kubenswrapper[4787]: I0129 14:32:11.182867 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerID="7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057" exitCode=0 Jan 29 14:32:11 crc kubenswrapper[4787]: I0129 14:32:11.182962 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerDied","Data":"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057"} Jan 29 14:32:12 crc kubenswrapper[4787]: I0129 14:32:12.193391 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerStarted","Data":"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a"} Jan 29 14:32:12 crc kubenswrapper[4787]: I0129 14:32:12.222971 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lznlk" podStartSLOduration=2.799329822 podStartE2EDuration="5.222952699s" podCreationTimestamp="2026-01-29 14:32:07 +0000 UTC" firstStartedPulling="2026-01-29 14:32:09.167946253 +0000 UTC m=+4567.929206529" lastFinishedPulling="2026-01-29 14:32:11.59156912 +0000 UTC m=+4570.352829406" observedRunningTime="2026-01-29 14:32:12.220924231 +0000 UTC m=+4570.982184507" watchObservedRunningTime="2026-01-29 14:32:12.222952699 +0000 UTC m=+4570.984212985" Jan 29 14:32:17 crc kubenswrapper[4787]: I0129 14:32:17.617284 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:17 crc kubenswrapper[4787]: I0129 14:32:17.618625 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:17 crc kubenswrapper[4787]: I0129 14:32:17.661249 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:18 crc kubenswrapper[4787]: I0129 14:32:18.303463 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:19 crc kubenswrapper[4787]: I0129 14:32:19.102221 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.253025 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lznlk" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="registry-server" containerID="cri-o://09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a" gracePeriod=2 Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.687227 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.827487 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5mj4\" (UniqueName: \"kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4\") pod \"5fd218fb-194c-435d-8b15-42ce8ee48d52\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.827627 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities\") pod \"5fd218fb-194c-435d-8b15-42ce8ee48d52\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.827748 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content\") pod \"5fd218fb-194c-435d-8b15-42ce8ee48d52\" (UID: \"5fd218fb-194c-435d-8b15-42ce8ee48d52\") " Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.829378 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities" (OuterVolumeSpecName: "utilities") pod "5fd218fb-194c-435d-8b15-42ce8ee48d52" (UID: "5fd218fb-194c-435d-8b15-42ce8ee48d52"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.839096 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4" (OuterVolumeSpecName: "kube-api-access-n5mj4") pod "5fd218fb-194c-435d-8b15-42ce8ee48d52" (UID: "5fd218fb-194c-435d-8b15-42ce8ee48d52"). InnerVolumeSpecName "kube-api-access-n5mj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.929968 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:32:20 crc kubenswrapper[4787]: I0129 14:32:20.930011 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5mj4\" (UniqueName: \"kubernetes.io/projected/5fd218fb-194c-435d-8b15-42ce8ee48d52-kube-api-access-n5mj4\") on node \"crc\" DevicePath \"\"" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.263576 4787 generic.go:334] "Generic (PLEG): container finished" podID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerID="09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a" exitCode=0 Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.263612 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerDied","Data":"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a"} Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.263629 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lznlk" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.263639 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lznlk" event={"ID":"5fd218fb-194c-435d-8b15-42ce8ee48d52","Type":"ContainerDied","Data":"dfe4a2fe9d2e7f2607648f01df9401d6f0a4fba3ccb9815b4436d3fa2bed8865"} Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.263657 4787 scope.go:117] "RemoveContainer" containerID="09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.285417 4787 scope.go:117] "RemoveContainer" containerID="7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.310604 4787 scope.go:117] "RemoveContainer" containerID="56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.332674 4787 scope.go:117] "RemoveContainer" containerID="09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a" Jan 29 14:32:21 crc kubenswrapper[4787]: E0129 14:32:21.333271 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a\": container with ID starting with 09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a not found: ID does not exist" containerID="09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.333305 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a"} err="failed to get container status \"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a\": rpc error: code = NotFound desc = could not find container \"09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a\": container with ID starting with 09113f563b82e80343e8f9db56b0c885b85133baf13b2dc079105508fef1205a not found: ID does not exist" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.333327 4787 scope.go:117] "RemoveContainer" containerID="7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057" Jan 29 14:32:21 crc kubenswrapper[4787]: E0129 14:32:21.333750 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057\": container with ID starting with 7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057 not found: ID does not exist" containerID="7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.333774 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057"} err="failed to get container status \"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057\": rpc error: code = NotFound desc = could not find container \"7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057\": container with ID starting with 7fc8b09bd445f5c4e223d7703d31cf6e19e9d921565f1baf820299ee17180057 not found: ID does not exist" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.333794 4787 scope.go:117] "RemoveContainer" containerID="56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a" Jan 29 14:32:21 crc kubenswrapper[4787]: E0129 14:32:21.334094 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a\": container with ID starting with 56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a not found: ID does not exist" containerID="56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.334114 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a"} err="failed to get container status \"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a\": rpc error: code = NotFound desc = could not find container \"56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a\": container with ID starting with 56d5caffe1cd2069fe96dddcde4dd0998c5b1e95f339e0d4854bb8d64124343a not found: ID does not exist" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.399891 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5fd218fb-194c-435d-8b15-42ce8ee48d52" (UID: "5fd218fb-194c-435d-8b15-42ce8ee48d52"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.436662 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5fd218fb-194c-435d-8b15-42ce8ee48d52-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.598529 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.607970 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lznlk"] Jan 29 14:32:21 crc kubenswrapper[4787]: I0129 14:32:21.995317 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" path="/var/lib/kubelet/pods/5fd218fb-194c-435d-8b15-42ce8ee48d52/volumes" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.819389 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:06 crc kubenswrapper[4787]: E0129 14:34:06.821375 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="extract-content" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.821399 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="extract-content" Jan 29 14:34:06 crc kubenswrapper[4787]: E0129 14:34:06.821412 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="registry-server" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.821420 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="registry-server" Jan 29 14:34:06 crc kubenswrapper[4787]: E0129 14:34:06.821484 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="extract-utilities" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.821493 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="extract-utilities" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.821687 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fd218fb-194c-435d-8b15-42ce8ee48d52" containerName="registry-server" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.823512 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:06 crc kubenswrapper[4787]: I0129 14:34:06.843294 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.020930 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9hbr\" (UniqueName: \"kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.021143 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.021294 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.123443 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9hbr\" (UniqueName: \"kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.123549 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.123582 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.124217 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.124583 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.151926 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9hbr\" (UniqueName: \"kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr\") pod \"redhat-operators-zzqhh\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.171218 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:07 crc kubenswrapper[4787]: I0129 14:34:07.628974 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:08 crc kubenswrapper[4787]: I0129 14:34:08.154449 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerID="5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535" exitCode=0 Jan 29 14:34:08 crc kubenswrapper[4787]: I0129 14:34:08.154789 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerDied","Data":"5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535"} Jan 29 14:34:08 crc kubenswrapper[4787]: I0129 14:34:08.154814 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerStarted","Data":"5ab67f75edcbfa06e0e9761cb143928e86f73cdc8d961bb46d98d3d0c9e2922e"} Jan 29 14:34:09 crc kubenswrapper[4787]: I0129 14:34:09.168404 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerStarted","Data":"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903"} Jan 29 14:34:10 crc kubenswrapper[4787]: I0129 14:34:10.181300 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerID="65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903" exitCode=0 Jan 29 14:34:10 crc kubenswrapper[4787]: I0129 14:34:10.181379 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerDied","Data":"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903"} Jan 29 14:34:12 crc kubenswrapper[4787]: I0129 14:34:12.229390 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerStarted","Data":"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3"} Jan 29 14:34:12 crc kubenswrapper[4787]: I0129 14:34:12.263896 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zzqhh" podStartSLOduration=3.31460795 podStartE2EDuration="6.26387569s" podCreationTimestamp="2026-01-29 14:34:06 +0000 UTC" firstStartedPulling="2026-01-29 14:34:08.155928734 +0000 UTC m=+4686.917189010" lastFinishedPulling="2026-01-29 14:34:11.105196474 +0000 UTC m=+4689.866456750" observedRunningTime="2026-01-29 14:34:12.256246904 +0000 UTC m=+4691.017507260" watchObservedRunningTime="2026-01-29 14:34:12.26387569 +0000 UTC m=+4691.025135976" Jan 29 14:34:17 crc kubenswrapper[4787]: I0129 14:34:17.172386 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:17 crc kubenswrapper[4787]: I0129 14:34:17.172887 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:18 crc kubenswrapper[4787]: I0129 14:34:18.211474 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zzqhh" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="registry-server" probeResult="failure" output=< Jan 29 14:34:18 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 14:34:18 crc kubenswrapper[4787]: > Jan 29 14:34:27 crc kubenswrapper[4787]: I0129 14:34:27.211013 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:27 crc kubenswrapper[4787]: I0129 14:34:27.266707 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:27 crc kubenswrapper[4787]: I0129 14:34:27.451311 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.355447 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zzqhh" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="registry-server" containerID="cri-o://0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3" gracePeriod=2 Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.394925 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.394994 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.765074 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.947054 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content\") pod \"0d5704f9-44ec-43e3-931c-a83b15cb396a\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.947161 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9hbr\" (UniqueName: \"kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr\") pod \"0d5704f9-44ec-43e3-931c-a83b15cb396a\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.947215 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities\") pod \"0d5704f9-44ec-43e3-931c-a83b15cb396a\" (UID: \"0d5704f9-44ec-43e3-931c-a83b15cb396a\") " Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.948548 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities" (OuterVolumeSpecName: "utilities") pod "0d5704f9-44ec-43e3-931c-a83b15cb396a" (UID: "0d5704f9-44ec-43e3-931c-a83b15cb396a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:34:28 crc kubenswrapper[4787]: I0129 14:34:28.960801 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr" (OuterVolumeSpecName: "kube-api-access-m9hbr") pod "0d5704f9-44ec-43e3-931c-a83b15cb396a" (UID: "0d5704f9-44ec-43e3-931c-a83b15cb396a"). InnerVolumeSpecName "kube-api-access-m9hbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.049788 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9hbr\" (UniqueName: \"kubernetes.io/projected/0d5704f9-44ec-43e3-931c-a83b15cb396a-kube-api-access-m9hbr\") on node \"crc\" DevicePath \"\"" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.050449 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.117672 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d5704f9-44ec-43e3-931c-a83b15cb396a" (UID: "0d5704f9-44ec-43e3-931c-a83b15cb396a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.152700 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d5704f9-44ec-43e3-931c-a83b15cb396a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.363867 4787 generic.go:334] "Generic (PLEG): container finished" podID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerID="0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3" exitCode=0 Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.363915 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerDied","Data":"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3"} Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.363935 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zzqhh" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.363948 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zzqhh" event={"ID":"0d5704f9-44ec-43e3-931c-a83b15cb396a","Type":"ContainerDied","Data":"5ab67f75edcbfa06e0e9761cb143928e86f73cdc8d961bb46d98d3d0c9e2922e"} Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.363968 4787 scope.go:117] "RemoveContainer" containerID="0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.394905 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.395057 4787 scope.go:117] "RemoveContainer" containerID="65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.406783 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zzqhh"] Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.418155 4787 scope.go:117] "RemoveContainer" containerID="5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.441722 4787 scope.go:117] "RemoveContainer" containerID="0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3" Jan 29 14:34:29 crc kubenswrapper[4787]: E0129 14:34:29.442217 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3\": container with ID starting with 0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3 not found: ID does not exist" containerID="0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.442253 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3"} err="failed to get container status \"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3\": rpc error: code = NotFound desc = could not find container \"0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3\": container with ID starting with 0c42eb5179b08535091e0e6f9e5dfe04468ae9de5fb6ffda494bf2b7f24d13a3 not found: ID does not exist" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.442279 4787 scope.go:117] "RemoveContainer" containerID="65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903" Jan 29 14:34:29 crc kubenswrapper[4787]: E0129 14:34:29.442646 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903\": container with ID starting with 65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903 not found: ID does not exist" containerID="65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.442674 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903"} err="failed to get container status \"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903\": rpc error: code = NotFound desc = could not find container \"65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903\": container with ID starting with 65f7227fc68add5300dec880f547ae43b5c8f6f30fcfd9fba2dd0074e06ed903 not found: ID does not exist" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.442691 4787 scope.go:117] "RemoveContainer" containerID="5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535" Jan 29 14:34:29 crc kubenswrapper[4787]: E0129 14:34:29.443163 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535\": container with ID starting with 5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535 not found: ID does not exist" containerID="5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535" Jan 29 14:34:29 crc kubenswrapper[4787]: I0129 14:34:29.443187 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535"} err="failed to get container status \"5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535\": rpc error: code = NotFound desc = could not find container \"5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535\": container with ID starting with 5804727e407243d3fbe08bfc5971c1e56fcdbaa65217648173765467e6dd1535 not found: ID does not exist" Jan 29 14:34:30 crc kubenswrapper[4787]: I0129 14:34:30.001522 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" path="/var/lib/kubelet/pods/0d5704f9-44ec-43e3-931c-a83b15cb396a/volumes" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.540673 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:34:36 crc kubenswrapper[4787]: E0129 14:34:36.541825 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="extract-utilities" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.541848 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="extract-utilities" Jan 29 14:34:36 crc kubenswrapper[4787]: E0129 14:34:36.541881 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="extract-content" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.541894 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="extract-content" Jan 29 14:34:36 crc kubenswrapper[4787]: E0129 14:34:36.541916 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="registry-server" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.541951 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="registry-server" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.542224 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d5704f9-44ec-43e3-931c-a83b15cb396a" containerName="registry-server" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.545296 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.547504 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.684807 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.684877 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.684909 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l54b9\" (UniqueName: \"kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.790022 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l54b9\" (UniqueName: \"kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.790212 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.790283 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.790910 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.790996 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.815555 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l54b9\" (UniqueName: \"kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9\") pod \"redhat-marketplace-szrpr\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:36 crc kubenswrapper[4787]: I0129 14:34:36.886903 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:37 crc kubenswrapper[4787]: I0129 14:34:37.505239 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:34:38 crc kubenswrapper[4787]: I0129 14:34:38.461430 4787 generic.go:334] "Generic (PLEG): container finished" podID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerID="65a52c63bd64e75c9249da90bbf61a330a96d573f2c130eb1f59e5d27b5d4518" exitCode=0 Jan 29 14:34:38 crc kubenswrapper[4787]: I0129 14:34:38.461513 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerDied","Data":"65a52c63bd64e75c9249da90bbf61a330a96d573f2c130eb1f59e5d27b5d4518"} Jan 29 14:34:38 crc kubenswrapper[4787]: I0129 14:34:38.461581 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerStarted","Data":"8ec461ff0b00d33d1fb94e458d1d6b2b983433018cff453bb84718d29221a1d5"} Jan 29 14:34:41 crc kubenswrapper[4787]: E0129 14:34:41.872804 4787 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a177636_6d03_440e_9dc3_0c1ec4cf4ffb.slice/crio-conmon-66d11130f5eeaff598cefde7718c07b77e61b2c6fe6ee93c178be5da22a71f6e.scope\": RecentStats: unable to find data in memory cache]" Jan 29 14:34:42 crc kubenswrapper[4787]: I0129 14:34:42.501719 4787 generic.go:334] "Generic (PLEG): container finished" podID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerID="66d11130f5eeaff598cefde7718c07b77e61b2c6fe6ee93c178be5da22a71f6e" exitCode=0 Jan 29 14:34:42 crc kubenswrapper[4787]: I0129 14:34:42.501783 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerDied","Data":"66d11130f5eeaff598cefde7718c07b77e61b2c6fe6ee93c178be5da22a71f6e"} Jan 29 14:34:46 crc kubenswrapper[4787]: I0129 14:34:46.557019 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerStarted","Data":"1c797d83d177cbdc3ccf48f2a9d6718b17606ade77fe1ede6cef5986d346ab53"} Jan 29 14:34:46 crc kubenswrapper[4787]: I0129 14:34:46.581617 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-szrpr" podStartSLOduration=4.042620951 podStartE2EDuration="10.581598063s" podCreationTimestamp="2026-01-29 14:34:36 +0000 UTC" firstStartedPulling="2026-01-29 14:34:38.463749951 +0000 UTC m=+4717.225010237" lastFinishedPulling="2026-01-29 14:34:45.002727063 +0000 UTC m=+4723.763987349" observedRunningTime="2026-01-29 14:34:46.579774322 +0000 UTC m=+4725.341034598" watchObservedRunningTime="2026-01-29 14:34:46.581598063 +0000 UTC m=+4725.342858349" Jan 29 14:34:46 crc kubenswrapper[4787]: I0129 14:34:46.887487 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:46 crc kubenswrapper[4787]: I0129 14:34:46.887798 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:47 crc kubenswrapper[4787]: I0129 14:34:47.983630 4787 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-szrpr" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="registry-server" probeResult="failure" output=< Jan 29 14:34:47 crc kubenswrapper[4787]: timeout: failed to connect service ":50051" within 1s Jan 29 14:34:47 crc kubenswrapper[4787]: > Jan 29 14:34:56 crc kubenswrapper[4787]: I0129 14:34:56.956552 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:57 crc kubenswrapper[4787]: I0129 14:34:57.042429 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:57 crc kubenswrapper[4787]: I0129 14:34:57.201949 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:34:58 crc kubenswrapper[4787]: I0129 14:34:58.395095 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:34:58 crc kubenswrapper[4787]: I0129 14:34:58.395203 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:34:58 crc kubenswrapper[4787]: I0129 14:34:58.689239 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-szrpr" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="registry-server" containerID="cri-o://1c797d83d177cbdc3ccf48f2a9d6718b17606ade77fe1ede6cef5986d346ab53" gracePeriod=2 Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.699867 4787 generic.go:334] "Generic (PLEG): container finished" podID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerID="1c797d83d177cbdc3ccf48f2a9d6718b17606ade77fe1ede6cef5986d346ab53" exitCode=0 Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.699917 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerDied","Data":"1c797d83d177cbdc3ccf48f2a9d6718b17606ade77fe1ede6cef5986d346ab53"} Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.700260 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szrpr" event={"ID":"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb","Type":"ContainerDied","Data":"8ec461ff0b00d33d1fb94e458d1d6b2b983433018cff453bb84718d29221a1d5"} Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.700275 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ec461ff0b00d33d1fb94e458d1d6b2b983433018cff453bb84718d29221a1d5" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.727402 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.825213 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content\") pod \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.825334 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l54b9\" (UniqueName: \"kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9\") pod \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.825408 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities\") pod \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\" (UID: \"2a177636-6d03-440e-9dc3-0c1ec4cf4ffb\") " Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.826577 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities" (OuterVolumeSpecName: "utilities") pod "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" (UID: "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.833043 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9" (OuterVolumeSpecName: "kube-api-access-l54b9") pod "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" (UID: "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb"). InnerVolumeSpecName "kube-api-access-l54b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.855046 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" (UID: "2a177636-6d03-440e-9dc3-0c1ec4cf4ffb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.927954 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.928007 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l54b9\" (UniqueName: \"kubernetes.io/projected/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-kube-api-access-l54b9\") on node \"crc\" DevicePath \"\"" Jan 29 14:34:59 crc kubenswrapper[4787]: I0129 14:34:59.928028 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:35:00 crc kubenswrapper[4787]: I0129 14:35:00.711329 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szrpr" Jan 29 14:35:00 crc kubenswrapper[4787]: I0129 14:35:00.749629 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:35:00 crc kubenswrapper[4787]: I0129 14:35:00.759771 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-szrpr"] Jan 29 14:35:02 crc kubenswrapper[4787]: I0129 14:35:02.001429 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" path="/var/lib/kubelet/pods/2a177636-6d03-440e-9dc3-0c1ec4cf4ffb/volumes" Jan 29 14:35:28 crc kubenswrapper[4787]: I0129 14:35:28.394766 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:35:28 crc kubenswrapper[4787]: I0129 14:35:28.395530 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:35:28 crc kubenswrapper[4787]: I0129 14:35:28.395599 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:35:28 crc kubenswrapper[4787]: I0129 14:35:28.956171 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:35:28 crc kubenswrapper[4787]: I0129 14:35:28.956729 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796" gracePeriod=600 Jan 29 14:35:29 crc kubenswrapper[4787]: I0129 14:35:29.971122 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796" exitCode=0 Jan 29 14:35:29 crc kubenswrapper[4787]: I0129 14:35:29.971179 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796"} Jan 29 14:35:29 crc kubenswrapper[4787]: I0129 14:35:29.971522 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb"} Jan 29 14:35:29 crc kubenswrapper[4787]: I0129 14:35:29.971557 4787 scope.go:117] "RemoveContainer" containerID="d5c9264e5e36637e973b6d6581741bdb533a0f1153b0fea2d4af03d9460f05a9" Jan 29 14:37:58 crc kubenswrapper[4787]: I0129 14:37:58.394698 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:37:58 crc kubenswrapper[4787]: I0129 14:37:58.395521 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.394311 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.394945 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.616215 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4qk5d/must-gather-d6rvj"] Jan 29 14:38:28 crc kubenswrapper[4787]: E0129 14:38:28.616528 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="extract-utilities" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.616541 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="extract-utilities" Jan 29 14:38:28 crc kubenswrapper[4787]: E0129 14:38:28.616558 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="extract-content" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.616564 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="extract-content" Jan 29 14:38:28 crc kubenswrapper[4787]: E0129 14:38:28.616587 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="registry-server" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.616593 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="registry-server" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.616727 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a177636-6d03-440e-9dc3-0c1ec4cf4ffb" containerName="registry-server" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.617554 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.620913 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4qk5d"/"openshift-service-ca.crt" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.620983 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4qk5d"/"kube-root-ca.crt" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.637491 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4qk5d/must-gather-d6rvj"] Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.817056 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjjhv\" (UniqueName: \"kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.817159 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.918973 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjjhv\" (UniqueName: \"kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.919085 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.919755 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:28 crc kubenswrapper[4787]: I0129 14:38:28.955556 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjjhv\" (UniqueName: \"kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv\") pod \"must-gather-d6rvj\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:29 crc kubenswrapper[4787]: I0129 14:38:29.244012 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:38:29 crc kubenswrapper[4787]: I0129 14:38:29.741111 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4qk5d/must-gather-d6rvj"] Jan 29 14:38:29 crc kubenswrapper[4787]: I0129 14:38:29.761693 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:38:30 crc kubenswrapper[4787]: I0129 14:38:30.503083 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" event={"ID":"276c414e-9383-4013-be98-925d9d641929","Type":"ContainerStarted","Data":"17b348f247fd4117d6bf81d8eec3a8d8bad6a6c4c7c8c5ac3352b87f8ba227da"} Jan 29 14:38:36 crc kubenswrapper[4787]: I0129 14:38:36.548854 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" event={"ID":"276c414e-9383-4013-be98-925d9d641929","Type":"ContainerStarted","Data":"0925e42b94012a650077305967aebbcb5f643c0f4591afe5d2c7d63229de8950"} Jan 29 14:38:36 crc kubenswrapper[4787]: I0129 14:38:36.549446 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" event={"ID":"276c414e-9383-4013-be98-925d9d641929","Type":"ContainerStarted","Data":"9e2ad90150cccb78023c4b7fc1df6226bbd5c29f3ba07a145a6cbd0775e1d756"} Jan 29 14:38:36 crc kubenswrapper[4787]: I0129 14:38:36.562030 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" podStartSLOduration=2.37913708 podStartE2EDuration="8.562013677s" podCreationTimestamp="2026-01-29 14:38:28 +0000 UTC" firstStartedPulling="2026-01-29 14:38:29.761631198 +0000 UTC m=+4948.522891464" lastFinishedPulling="2026-01-29 14:38:35.944507735 +0000 UTC m=+4954.705768061" observedRunningTime="2026-01-29 14:38:36.561966085 +0000 UTC m=+4955.323226361" watchObservedRunningTime="2026-01-29 14:38:36.562013677 +0000 UTC m=+4955.323273953" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.394241 4787 patch_prober.go:28] interesting pod/machine-config-daemon-q79sn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.394914 4787 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.394975 4787 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.395833 4787 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb"} pod="openshift-machine-config-operator/machine-config-daemon-q79sn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.395929 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerName="machine-config-daemon" containerID="cri-o://9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" gracePeriod=600 Jan 29 14:38:58 crc kubenswrapper[4787]: E0129 14:38:58.533670 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.708234 4787 generic.go:334] "Generic (PLEG): container finished" podID="6311862b-6ca2-4dba-85e0-6829dd45c2db" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" exitCode=0 Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.708289 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerDied","Data":"9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb"} Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.708345 4787 scope.go:117] "RemoveContainer" containerID="c906f34e52b877d25ef5aad3562cb5fbdb2445aeb3dfa496068c08baebce3796" Jan 29 14:38:58 crc kubenswrapper[4787]: I0129 14:38:58.709094 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:38:58 crc kubenswrapper[4787]: E0129 14:38:58.709579 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:39:09 crc kubenswrapper[4787]: I0129 14:39:09.985973 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:39:09 crc kubenswrapper[4787]: E0129 14:39:09.986796 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:39:20 crc kubenswrapper[4787]: I0129 14:39:20.985268 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:39:20 crc kubenswrapper[4787]: E0129 14:39:20.986058 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:39:34 crc kubenswrapper[4787]: I0129 14:39:34.986026 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:39:34 crc kubenswrapper[4787]: E0129 14:39:34.986641 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:39:36 crc kubenswrapper[4787]: I0129 14:39:36.950502 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/util/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.107566 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/util/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.128075 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/pull/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.147838 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/pull/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.315145 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/extract/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.362629 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/util/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.371473 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b6f0ceecaefb29a8e5801b760101e31cf6295f8d10236ea1e93fc043d1b4wpg_762c1b23-a057-48d8-a61b-bef88d9f588a/pull/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.547815 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b6c4d8c5f-mbqml_fc5f8eee-854d-4c9f-9306-9c8976fdca42/manager/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.620483 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8d874c8fc-6gl4h_762f62bb-d090-474d-b9f1-36ec8943103f/manager/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.744036 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d9697b7f4-mgb4d_1c223b82-ca0b-4d31-b6ca-df34fd0684e4/manager/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.852827 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8886f4c47-77c5v_4470a145-09b2-435b-ba61-8b96b442c503/manager/0.log" Jan 29 14:39:37 crc kubenswrapper[4787]: I0129 14:39:37.969768 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69d6db494d-p7szc_a05f08af-57ab-4b3b-b15a-05d66257ed6e/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.080471 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-h4m5x_95712c95-56cf-4b2a-9590-fd82b55811c9/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.335636 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5f4b8bd54d-jsfxt_89290a20-6551-4b90-940a-d3ac4c676efc/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.402752 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-cktt7_c1d33841-e546-4927-a97a-ee8e6eee6765/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.482004 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7dd968899f-wfpg9_fd03e55b-eda1-478b-b41c-c97113cd3045/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.544840 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-84f48565d4-gjzmj_9f1a9e7c-3ca7-4fd3-ab71-838e77a80368/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.691630 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-fgs9r_bccdaac3-abf0-40b7-8421-499efcb20f1e/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.778835 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-585dbc889-dqzfb_4eab04f9-d92c-40f2-bc47-970ecd86b6e4/manager/0.log" Jan 29 14:39:38 crc kubenswrapper[4787]: I0129 14:39:38.980735 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-55bff696bd-pjlwc_bb138494-6a07-478c-b107-c1fd788bf4d7/manager/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.027019 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6687f8d877-tdkqd_a5c3aabb-1f99-416b-9765-28cb31fc1b39/manager/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.157001 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-86dfb79cc7vw2p6_6327fc99-6096-4780-8d8d-11d454f09e83/manager/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.316954 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-757f46c65d-22scc_a07f8190-e680-4b1b-b445-6a8a2f98c85c/operator/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.609243 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-m6lp4_cd1f5137-b9a8-4b27-baa6-f7117852d1fc/registry-server/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.770042 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-dlk2z_1aac5893-712c-4305-be5c-058309de4369/manager/0.log" Jan 29 14:39:39 crc kubenswrapper[4787]: I0129 14:39:39.836833 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-jn5rl_50979af0-52a2-45bd-b6af-22e22daeacee/manager/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.018760 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6b6f655c79-smwj6_c77e73c8-e6c7-4ae4-be36-4ef845996f9c/manager/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.031230 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-wk5tx_9046af2e-08a5-402f-97ee-5946a966b8f7/operator/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.150405 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-7p9r2_13817508-b3e5-4f29-94d9-84fc3192d6e6/manager/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.315182 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b5b76f97-dsk62_79858873-af90-4279-82dd-ff3a996bcb30/manager/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.369498 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-glb7m_57cbf5c5-c741-4f43-881e-bf2dbecace54/manager/0.log" Jan 29 14:39:40 crc kubenswrapper[4787]: I0129 14:39:40.447080 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-564965969-57x96_3203adf3-229d-4717-95c2-d0dd83d6909b/manager/0.log" Jan 29 14:39:45 crc kubenswrapper[4787]: I0129 14:39:45.985871 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:39:45 crc kubenswrapper[4787]: E0129 14:39:45.986133 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:39:58 crc kubenswrapper[4787]: I0129 14:39:58.745575 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-jdzgw_2f88bb10-54c4-41f6-9345-74d441059753/control-plane-machine-set-operator/0.log" Jan 29 14:39:58 crc kubenswrapper[4787]: I0129 14:39:58.895262 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vlzsj_8ed21e2d-8627-418a-97d2-5576950e3494/kube-rbac-proxy/0.log" Jan 29 14:39:58 crc kubenswrapper[4787]: I0129 14:39:58.913202 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vlzsj_8ed21e2d-8627-418a-97d2-5576950e3494/machine-api-operator/0.log" Jan 29 14:39:58 crc kubenswrapper[4787]: I0129 14:39:58.986311 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:39:58 crc kubenswrapper[4787]: E0129 14:39:58.986500 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:40:09 crc kubenswrapper[4787]: I0129 14:40:09.986396 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:40:09 crc kubenswrapper[4787]: E0129 14:40:09.987598 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:40:11 crc kubenswrapper[4787]: I0129 14:40:11.825912 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-545d4d4674-ks2h5_3689cb82-2282-4fd1-bd6e-bbcff6e1f8f0/cert-manager-controller/0.log" Jan 29 14:40:12 crc kubenswrapper[4787]: I0129 14:40:12.082755 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-5545bd876-2dtz4_e6a3f740-0b80-4393-b901-8058c12d3bcc/cert-manager-cainjector/0.log" Jan 29 14:40:12 crc kubenswrapper[4787]: I0129 14:40:12.196633 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-6888856db4-rnnjn_aa78f35d-c744-44e3-ae55-363c3f891fb9/cert-manager-webhook/0.log" Jan 29 14:40:20 crc kubenswrapper[4787]: I0129 14:40:20.985416 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:40:20 crc kubenswrapper[4787]: E0129 14:40:20.986316 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:40:25 crc kubenswrapper[4787]: I0129 14:40:25.628441 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wht4l_9480738d-8817-480c-8968-7107f514a967/nmstate-console-plugin/0.log" Jan 29 14:40:25 crc kubenswrapper[4787]: I0129 14:40:25.797704 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-55sg5_f92a2cd2-6857-4c80-bfea-c78d4803e46c/nmstate-handler/0.log" Jan 29 14:40:25 crc kubenswrapper[4787]: I0129 14:40:25.862799 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-qsb6g_55575e97-6bfe-40d8-b4b5-8f5b020ef25f/kube-rbac-proxy/0.log" Jan 29 14:40:25 crc kubenswrapper[4787]: I0129 14:40:25.925833 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-qsb6g_55575e97-6bfe-40d8-b4b5-8f5b020ef25f/nmstate-metrics/0.log" Jan 29 14:40:26 crc kubenswrapper[4787]: I0129 14:40:26.038847 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-mwzj8_d1ad75bd-bcbc-49f4-a0ca-3312f4cee489/nmstate-operator/0.log" Jan 29 14:40:26 crc kubenswrapper[4787]: I0129 14:40:26.169944 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-r6pn9_5609e633-dfb8-473b-9165-437046bbf13b/nmstate-webhook/0.log" Jan 29 14:40:34 crc kubenswrapper[4787]: I0129 14:40:34.985631 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:40:34 crc kubenswrapper[4787]: E0129 14:40:34.986443 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:40:46 crc kubenswrapper[4787]: I0129 14:40:46.985488 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:40:46 crc kubenswrapper[4787]: E0129 14:40:46.986159 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.134085 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-94cfr_b506b359-c014-4d87-b053-ec4e7fc51ba2/kube-rbac-proxy/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.503154 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-94cfr_b506b359-c014-4d87-b053-ec4e7fc51ba2/controller/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.586226 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-frr-files/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.772735 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-metrics/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.816612 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-reloader/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.823413 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-frr-files/0.log" Jan 29 14:40:55 crc kubenswrapper[4787]: I0129 14:40:55.851899 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-reloader/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.039795 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-reloader/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.042769 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-frr-files/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.132188 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-metrics/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.147490 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-metrics/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.264192 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-frr-files/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.264812 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-reloader/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.300885 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/cp-metrics/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.336036 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/controller/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.444387 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/frr-metrics/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.505526 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/kube-rbac-proxy/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.579208 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/kube-rbac-proxy-frr/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.634635 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/reloader/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.769119 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-l82t9_d4c81250-d1f0-4706-8c3f-70e69c976131/frr-k8s-webhook-server/0.log" Jan 29 14:40:56 crc kubenswrapper[4787]: I0129 14:40:56.973892 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-757f9bfbb9-gzt8x_6758092c-146f-4462-9aea-214f3c018f71/manager/0.log" Jan 29 14:40:57 crc kubenswrapper[4787]: I0129 14:40:57.069244 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-747fb77c56-fsqhq_43423821-b0d6-4d8a-a8cb-d03102470ff0/webhook-server/0.log" Jan 29 14:40:57 crc kubenswrapper[4787]: I0129 14:40:57.272412 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ntsqx_ea0fd311-2d72-463c-8f9b-2c9ba1dc8903/kube-rbac-proxy/0.log" Jan 29 14:40:57 crc kubenswrapper[4787]: I0129 14:40:57.664607 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8nc6k_499c781e-42ec-4475-ab1e-d5204f4bdac4/frr/0.log" Jan 29 14:40:57 crc kubenswrapper[4787]: I0129 14:40:57.729222 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ntsqx_ea0fd311-2d72-463c-8f9b-2c9ba1dc8903/speaker/0.log" Jan 29 14:40:59 crc kubenswrapper[4787]: I0129 14:40:59.986143 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:40:59 crc kubenswrapper[4787]: E0129 14:40:59.986622 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:41:10 crc kubenswrapper[4787]: I0129 14:41:10.637231 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/util/0.log" Jan 29 14:41:10 crc kubenswrapper[4787]: I0129 14:41:10.889892 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/util/0.log" Jan 29 14:41:10 crc kubenswrapper[4787]: I0129 14:41:10.953708 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/pull/0.log" Jan 29 14:41:10 crc kubenswrapper[4787]: I0129 14:41:10.956100 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/pull/0.log" Jan 29 14:41:10 crc kubenswrapper[4787]: I0129 14:41:10.986474 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:41:10 crc kubenswrapper[4787]: E0129 14:41:10.986718 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.124954 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/util/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.147131 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/extract/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.185074 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcx465n_ac56de9c-d784-426d-830c-9adfa79702a0/pull/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.285381 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/util/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.474939 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/pull/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.478525 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/util/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.530326 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/pull/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.668942 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/util/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.682762 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/extract/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.699029 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713z82f4_07d08c2a-7015-4fca-9056-da4ee86a0d95/pull/0.log" Jan 29 14:41:11 crc kubenswrapper[4787]: I0129 14:41:11.848792 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/util/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.047602 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/util/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.082789 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/pull/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.125600 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/pull/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.281482 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/util/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.325364 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/extract/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.376813 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5ptkcb_2bed7c3e-80c2-41f8-8cb0-58f15efb7a22/pull/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.436619 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-utilities/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.685986 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-utilities/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.695679 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-content/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.698107 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-content/0.log" Jan 29 14:41:12 crc kubenswrapper[4787]: I0129 14:41:12.867605 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-content/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.153413 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/extract-utilities/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.367505 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-utilities/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.501799 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-utilities/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.585045 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-content/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.588369 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-42l7p_3ca49f9d-e677-4820-af3d-92b6b1233cc6/registry-server/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.622018 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-content/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.761471 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-utilities/0.log" Jan 29 14:41:13 crc kubenswrapper[4787]: I0129 14:41:13.821593 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/extract-content/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.035375 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-kvljk_b9cf6a55-fb13-46ac-b0e6-fccf7aeee0e7/marketplace-operator/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.139371 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-utilities/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.379252 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-utilities/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.418160 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-content/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.441282 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-content/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.506677 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vhgdz_50557508-a648-4ea9-982d-fe0cf3f70b3c/registry-server/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.608234 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-utilities/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.644530 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/extract-content/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.715034 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-utilities/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.847025 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-vjqjt_ab1da11b-1f91-4dbe-8dc5-04b11596596b/registry-server/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.884227 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-utilities/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.924132 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-content/0.log" Jan 29 14:41:14 crc kubenswrapper[4787]: I0129 14:41:14.928518 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-content/0.log" Jan 29 14:41:15 crc kubenswrapper[4787]: I0129 14:41:15.084428 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-utilities/0.log" Jan 29 14:41:15 crc kubenswrapper[4787]: I0129 14:41:15.114112 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/extract-content/0.log" Jan 29 14:41:15 crc kubenswrapper[4787]: I0129 14:41:15.641817 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-6s2hl_8f20d430-2c3b-4ccf-888e-430a6bbf9979/registry-server/0.log" Jan 29 14:41:23 crc kubenswrapper[4787]: I0129 14:41:23.985705 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:41:23 crc kubenswrapper[4787]: E0129 14:41:23.986389 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:41:25 crc kubenswrapper[4787]: I0129 14:41:25.183419 4787 scope.go:117] "RemoveContainer" containerID="66d11130f5eeaff598cefde7718c07b77e61b2c6fe6ee93c178be5da22a71f6e" Jan 29 14:41:25 crc kubenswrapper[4787]: I0129 14:41:25.212200 4787 scope.go:117] "RemoveContainer" containerID="1c797d83d177cbdc3ccf48f2a9d6718b17606ade77fe1ede6cef5986d346ab53" Jan 29 14:41:25 crc kubenswrapper[4787]: I0129 14:41:25.259035 4787 scope.go:117] "RemoveContainer" containerID="65a52c63bd64e75c9249da90bbf61a330a96d573f2c130eb1f59e5d27b5d4518" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.034962 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.036599 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.051210 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.096086 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4chw5\" (UniqueName: \"kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.096169 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.096261 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.197438 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4chw5\" (UniqueName: \"kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.197530 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.197610 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.198017 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.198094 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.217783 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4chw5\" (UniqueName: \"kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5\") pod \"community-operators-t8m7j\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.378337 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:32 crc kubenswrapper[4787]: I0129 14:41:32.858846 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:33 crc kubenswrapper[4787]: I0129 14:41:33.247574 4787 generic.go:334] "Generic (PLEG): container finished" podID="76c73153-265d-4dd9-b55a-580cddf91db2" containerID="50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf" exitCode=0 Jan 29 14:41:33 crc kubenswrapper[4787]: I0129 14:41:33.247636 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerDied","Data":"50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf"} Jan 29 14:41:33 crc kubenswrapper[4787]: I0129 14:41:33.247702 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerStarted","Data":"24d26eda61762afdde3f8c9cd606f265bff6f266e9c5d15bf7115d3fbef6b804"} Jan 29 14:41:34 crc kubenswrapper[4787]: I0129 14:41:34.256233 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerStarted","Data":"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496"} Jan 29 14:41:35 crc kubenswrapper[4787]: I0129 14:41:35.279207 4787 generic.go:334] "Generic (PLEG): container finished" podID="76c73153-265d-4dd9-b55a-580cddf91db2" containerID="ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496" exitCode=0 Jan 29 14:41:35 crc kubenswrapper[4787]: I0129 14:41:35.279368 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerDied","Data":"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496"} Jan 29 14:41:36 crc kubenswrapper[4787]: I0129 14:41:36.291853 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerStarted","Data":"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640"} Jan 29 14:41:36 crc kubenswrapper[4787]: I0129 14:41:36.324410 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t8m7j" podStartSLOduration=1.8827524420000001 podStartE2EDuration="4.324385105s" podCreationTimestamp="2026-01-29 14:41:32 +0000 UTC" firstStartedPulling="2026-01-29 14:41:33.24965721 +0000 UTC m=+5132.010917486" lastFinishedPulling="2026-01-29 14:41:35.691289853 +0000 UTC m=+5134.452550149" observedRunningTime="2026-01-29 14:41:36.318231001 +0000 UTC m=+5135.079491297" watchObservedRunningTime="2026-01-29 14:41:36.324385105 +0000 UTC m=+5135.085645391" Jan 29 14:41:36 crc kubenswrapper[4787]: I0129 14:41:36.987015 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:41:36 crc kubenswrapper[4787]: E0129 14:41:36.987186 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:41:42 crc kubenswrapper[4787]: I0129 14:41:42.379330 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:42 crc kubenswrapper[4787]: I0129 14:41:42.380147 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:42 crc kubenswrapper[4787]: I0129 14:41:42.459638 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:43 crc kubenswrapper[4787]: I0129 14:41:43.413115 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:43 crc kubenswrapper[4787]: I0129 14:41:43.474658 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.359759 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t8m7j" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="registry-server" containerID="cri-o://cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640" gracePeriod=2 Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.855849 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.926116 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities\") pod \"76c73153-265d-4dd9-b55a-580cddf91db2\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.926158 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4chw5\" (UniqueName: \"kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5\") pod \"76c73153-265d-4dd9-b55a-580cddf91db2\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.926197 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content\") pod \"76c73153-265d-4dd9-b55a-580cddf91db2\" (UID: \"76c73153-265d-4dd9-b55a-580cddf91db2\") " Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.952016 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities" (OuterVolumeSpecName: "utilities") pod "76c73153-265d-4dd9-b55a-580cddf91db2" (UID: "76c73153-265d-4dd9-b55a-580cddf91db2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.958301 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5" (OuterVolumeSpecName: "kube-api-access-4chw5") pod "76c73153-265d-4dd9-b55a-580cddf91db2" (UID: "76c73153-265d-4dd9-b55a-580cddf91db2"). InnerVolumeSpecName "kube-api-access-4chw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:41:45 crc kubenswrapper[4787]: I0129 14:41:45.976072 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76c73153-265d-4dd9-b55a-580cddf91db2" (UID: "76c73153-265d-4dd9-b55a-580cddf91db2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.027500 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.027529 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c73153-265d-4dd9-b55a-580cddf91db2-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.027540 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4chw5\" (UniqueName: \"kubernetes.io/projected/76c73153-265d-4dd9-b55a-580cddf91db2-kube-api-access-4chw5\") on node \"crc\" DevicePath \"\"" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.373171 4787 generic.go:334] "Generic (PLEG): container finished" podID="76c73153-265d-4dd9-b55a-580cddf91db2" containerID="cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640" exitCode=0 Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.373214 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerDied","Data":"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640"} Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.373239 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t8m7j" event={"ID":"76c73153-265d-4dd9-b55a-580cddf91db2","Type":"ContainerDied","Data":"24d26eda61762afdde3f8c9cd606f265bff6f266e9c5d15bf7115d3fbef6b804"} Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.373253 4787 scope.go:117] "RemoveContainer" containerID="cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.373376 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t8m7j" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.408858 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.412108 4787 scope.go:117] "RemoveContainer" containerID="ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.449777 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t8m7j"] Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.469636 4787 scope.go:117] "RemoveContainer" containerID="50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.525729 4787 scope.go:117] "RemoveContainer" containerID="cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640" Jan 29 14:41:46 crc kubenswrapper[4787]: E0129 14:41:46.526363 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640\": container with ID starting with cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640 not found: ID does not exist" containerID="cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.526439 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640"} err="failed to get container status \"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640\": rpc error: code = NotFound desc = could not find container \"cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640\": container with ID starting with cff67e0994d8ab8724ab72bccea34cc6dfe935f7bd1b54feccce7848699fb640 not found: ID does not exist" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.526550 4787 scope.go:117] "RemoveContainer" containerID="ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496" Jan 29 14:41:46 crc kubenswrapper[4787]: E0129 14:41:46.527668 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496\": container with ID starting with ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496 not found: ID does not exist" containerID="ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.527750 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496"} err="failed to get container status \"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496\": rpc error: code = NotFound desc = could not find container \"ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496\": container with ID starting with ded72aa384b004e209bd1a067df8bc3f54856a2079e34486a4a515c01dc0d496 not found: ID does not exist" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.527804 4787 scope.go:117] "RemoveContainer" containerID="50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf" Jan 29 14:41:46 crc kubenswrapper[4787]: E0129 14:41:46.528329 4787 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf\": container with ID starting with 50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf not found: ID does not exist" containerID="50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf" Jan 29 14:41:46 crc kubenswrapper[4787]: I0129 14:41:46.528367 4787 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf"} err="failed to get container status \"50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf\": rpc error: code = NotFound desc = could not find container \"50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf\": container with ID starting with 50f7b6596d53d223fac40c7a0aca59fb48c560073cce4c064d5f9d4f67a2bccf not found: ID does not exist" Jan 29 14:41:47 crc kubenswrapper[4787]: I0129 14:41:47.997691 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" path="/var/lib/kubelet/pods/76c73153-265d-4dd9-b55a-580cddf91db2/volumes" Jan 29 14:41:49 crc kubenswrapper[4787]: I0129 14:41:49.992981 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:41:49 crc kubenswrapper[4787]: E0129 14:41:49.993898 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:42:00 crc kubenswrapper[4787]: I0129 14:42:00.985251 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:42:00 crc kubenswrapper[4787]: E0129 14:42:00.986009 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:42:12 crc kubenswrapper[4787]: I0129 14:42:12.986181 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:42:12 crc kubenswrapper[4787]: E0129 14:42:12.986983 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:42:26 crc kubenswrapper[4787]: I0129 14:42:26.711233 4787 generic.go:334] "Generic (PLEG): container finished" podID="276c414e-9383-4013-be98-925d9d641929" containerID="9e2ad90150cccb78023c4b7fc1df6226bbd5c29f3ba07a145a6cbd0775e1d756" exitCode=0 Jan 29 14:42:26 crc kubenswrapper[4787]: I0129 14:42:26.711480 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" event={"ID":"276c414e-9383-4013-be98-925d9d641929","Type":"ContainerDied","Data":"9e2ad90150cccb78023c4b7fc1df6226bbd5c29f3ba07a145a6cbd0775e1d756"} Jan 29 14:42:26 crc kubenswrapper[4787]: I0129 14:42:26.713561 4787 scope.go:117] "RemoveContainer" containerID="9e2ad90150cccb78023c4b7fc1df6226bbd5c29f3ba07a145a6cbd0775e1d756" Jan 29 14:42:27 crc kubenswrapper[4787]: I0129 14:42:27.576065 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4qk5d_must-gather-d6rvj_276c414e-9383-4013-be98-925d9d641929/gather/0.log" Jan 29 14:42:27 crc kubenswrapper[4787]: I0129 14:42:27.985904 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:42:27 crc kubenswrapper[4787]: E0129 14:42:27.986575 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.387075 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4qk5d/must-gather-d6rvj"] Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.388164 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="copy" containerID="cri-o://0925e42b94012a650077305967aebbcb5f643c0f4591afe5d2c7d63229de8950" gracePeriod=2 Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.397889 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4qk5d/must-gather-d6rvj"] Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.781196 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4qk5d_must-gather-d6rvj_276c414e-9383-4013-be98-925d9d641929/copy/0.log" Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.781930 4787 generic.go:334] "Generic (PLEG): container finished" podID="276c414e-9383-4013-be98-925d9d641929" containerID="0925e42b94012a650077305967aebbcb5f643c0f4591afe5d2c7d63229de8950" exitCode=143 Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.835260 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4qk5d_must-gather-d6rvj_276c414e-9383-4013-be98-925d9d641929/copy/0.log" Jan 29 14:42:34 crc kubenswrapper[4787]: I0129 14:42:34.835850 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.033036 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output\") pod \"276c414e-9383-4013-be98-925d9d641929\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.033138 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjjhv\" (UniqueName: \"kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv\") pod \"276c414e-9383-4013-be98-925d9d641929\" (UID: \"276c414e-9383-4013-be98-925d9d641929\") " Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.040234 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv" (OuterVolumeSpecName: "kube-api-access-kjjhv") pod "276c414e-9383-4013-be98-925d9d641929" (UID: "276c414e-9383-4013-be98-925d9d641929"). InnerVolumeSpecName "kube-api-access-kjjhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.133563 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "276c414e-9383-4013-be98-925d9d641929" (UID: "276c414e-9383-4013-be98-925d9d641929"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.135628 4787 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/276c414e-9383-4013-be98-925d9d641929-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.135669 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjjhv\" (UniqueName: \"kubernetes.io/projected/276c414e-9383-4013-be98-925d9d641929-kube-api-access-kjjhv\") on node \"crc\" DevicePath \"\"" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.790849 4787 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4qk5d_must-gather-d6rvj_276c414e-9383-4013-be98-925d9d641929/copy/0.log" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.791338 4787 scope.go:117] "RemoveContainer" containerID="0925e42b94012a650077305967aebbcb5f643c0f4591afe5d2c7d63229de8950" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.791410 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4qk5d/must-gather-d6rvj" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.823000 4787 scope.go:117] "RemoveContainer" containerID="9e2ad90150cccb78023c4b7fc1df6226bbd5c29f3ba07a145a6cbd0775e1d756" Jan 29 14:42:35 crc kubenswrapper[4787]: I0129 14:42:35.994097 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="276c414e-9383-4013-be98-925d9d641929" path="/var/lib/kubelet/pods/276c414e-9383-4013-be98-925d9d641929/volumes" Jan 29 14:42:42 crc kubenswrapper[4787]: I0129 14:42:42.986210 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:42:42 crc kubenswrapper[4787]: E0129 14:42:42.987292 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.746489 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:42:50 crc kubenswrapper[4787]: E0129 14:42:50.747505 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="copy" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747526 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="copy" Jan 29 14:42:50 crc kubenswrapper[4787]: E0129 14:42:50.747542 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="extract-utilities" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747552 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="extract-utilities" Jan 29 14:42:50 crc kubenswrapper[4787]: E0129 14:42:50.747566 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="extract-content" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747575 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="extract-content" Jan 29 14:42:50 crc kubenswrapper[4787]: E0129 14:42:50.747611 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="registry-server" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747623 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="registry-server" Jan 29 14:42:50 crc kubenswrapper[4787]: E0129 14:42:50.747650 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="gather" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747661 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="gather" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747872 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="gather" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747908 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="76c73153-265d-4dd9-b55a-580cddf91db2" containerName="registry-server" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.747923 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="276c414e-9383-4013-be98-925d9d641929" containerName="copy" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.751015 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.758054 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.921005 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.921073 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:50 crc kubenswrapper[4787]: I0129 14:42:50.921141 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgfzd\" (UniqueName: \"kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.021913 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgfzd\" (UniqueName: \"kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.021987 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.022029 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.022442 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.022718 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.041323 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgfzd\" (UniqueName: \"kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd\") pod \"certified-operators-jqzwp\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.129341 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.625121 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.932855 4787 generic.go:334] "Generic (PLEG): container finished" podID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerID="dc56479261835e70d70fbd884b76a51e948926abdacba9719d9df76c3af88eff" exitCode=0 Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.933106 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerDied","Data":"dc56479261835e70d70fbd884b76a51e948926abdacba9719d9df76c3af88eff"} Jan 29 14:42:51 crc kubenswrapper[4787]: I0129 14:42:51.933218 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerStarted","Data":"314e35c12b8571ecb4145adad292990101e3fecfb3878b1659b07ae48d4a6485"} Jan 29 14:42:52 crc kubenswrapper[4787]: I0129 14:42:52.942259 4787 generic.go:334] "Generic (PLEG): container finished" podID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerID="8ae9fc4cd68d943b5b042a30804ebb446051af2fd08f6b625699d7a7935af1b2" exitCode=0 Jan 29 14:42:52 crc kubenswrapper[4787]: I0129 14:42:52.942363 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerDied","Data":"8ae9fc4cd68d943b5b042a30804ebb446051af2fd08f6b625699d7a7935af1b2"} Jan 29 14:42:53 crc kubenswrapper[4787]: I0129 14:42:53.950818 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerStarted","Data":"b6a75127b19e08b5c8b1bff063e58bbe95f5c52adc06f0ca2ccd498aaaed9c84"} Jan 29 14:42:53 crc kubenswrapper[4787]: I0129 14:42:53.975589 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jqzwp" podStartSLOduration=2.546731988 podStartE2EDuration="3.975567433s" podCreationTimestamp="2026-01-29 14:42:50 +0000 UTC" firstStartedPulling="2026-01-29 14:42:51.934524523 +0000 UTC m=+5210.695784799" lastFinishedPulling="2026-01-29 14:42:53.363359958 +0000 UTC m=+5212.124620244" observedRunningTime="2026-01-29 14:42:53.969136041 +0000 UTC m=+5212.730396337" watchObservedRunningTime="2026-01-29 14:42:53.975567433 +0000 UTC m=+5212.736827719" Jan 29 14:42:55 crc kubenswrapper[4787]: I0129 14:42:55.986605 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:42:55 crc kubenswrapper[4787]: E0129 14:42:55.988434 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:43:01 crc kubenswrapper[4787]: I0129 14:43:01.130244 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:01 crc kubenswrapper[4787]: I0129 14:43:01.130620 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:01 crc kubenswrapper[4787]: I0129 14:43:01.171821 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:02 crc kubenswrapper[4787]: I0129 14:43:02.059175 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:02 crc kubenswrapper[4787]: I0129 14:43:02.112803 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:43:04 crc kubenswrapper[4787]: I0129 14:43:04.022136 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jqzwp" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="registry-server" containerID="cri-o://b6a75127b19e08b5c8b1bff063e58bbe95f5c52adc06f0ca2ccd498aaaed9c84" gracePeriod=2 Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.035237 4787 generic.go:334] "Generic (PLEG): container finished" podID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerID="b6a75127b19e08b5c8b1bff063e58bbe95f5c52adc06f0ca2ccd498aaaed9c84" exitCode=0 Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.035330 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerDied","Data":"b6a75127b19e08b5c8b1bff063e58bbe95f5c52adc06f0ca2ccd498aaaed9c84"} Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.035564 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jqzwp" event={"ID":"13f88697-a0c5-49cf-80f0-d3534b115e84","Type":"ContainerDied","Data":"314e35c12b8571ecb4145adad292990101e3fecfb3878b1659b07ae48d4a6485"} Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.035581 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="314e35c12b8571ecb4145adad292990101e3fecfb3878b1659b07ae48d4a6485" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.054425 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.167122 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities\") pod \"13f88697-a0c5-49cf-80f0-d3534b115e84\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.167195 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgfzd\" (UniqueName: \"kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd\") pod \"13f88697-a0c5-49cf-80f0-d3534b115e84\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.167344 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content\") pod \"13f88697-a0c5-49cf-80f0-d3534b115e84\" (UID: \"13f88697-a0c5-49cf-80f0-d3534b115e84\") " Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.168250 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities" (OuterVolumeSpecName: "utilities") pod "13f88697-a0c5-49cf-80f0-d3534b115e84" (UID: "13f88697-a0c5-49cf-80f0-d3534b115e84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.183635 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd" (OuterVolumeSpecName: "kube-api-access-jgfzd") pod "13f88697-a0c5-49cf-80f0-d3534b115e84" (UID: "13f88697-a0c5-49cf-80f0-d3534b115e84"). InnerVolumeSpecName "kube-api-access-jgfzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.216295 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13f88697-a0c5-49cf-80f0-d3534b115e84" (UID: "13f88697-a0c5-49cf-80f0-d3534b115e84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.268791 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.268827 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13f88697-a0c5-49cf-80f0-d3534b115e84-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:43:05 crc kubenswrapper[4787]: I0129 14:43:05.268842 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgfzd\" (UniqueName: \"kubernetes.io/projected/13f88697-a0c5-49cf-80f0-d3534b115e84-kube-api-access-jgfzd\") on node \"crc\" DevicePath \"\"" Jan 29 14:43:06 crc kubenswrapper[4787]: I0129 14:43:06.043246 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jqzwp" Jan 29 14:43:06 crc kubenswrapper[4787]: I0129 14:43:06.078393 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:43:06 crc kubenswrapper[4787]: I0129 14:43:06.082853 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jqzwp"] Jan 29 14:43:07 crc kubenswrapper[4787]: I0129 14:43:07.998950 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" path="/var/lib/kubelet/pods/13f88697-a0c5-49cf-80f0-d3534b115e84/volumes" Jan 29 14:43:09 crc kubenswrapper[4787]: I0129 14:43:09.985598 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:43:09 crc kubenswrapper[4787]: E0129 14:43:09.985951 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:43:20 crc kubenswrapper[4787]: I0129 14:43:20.986274 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:43:20 crc kubenswrapper[4787]: E0129 14:43:20.987237 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:43:32 crc kubenswrapper[4787]: I0129 14:43:32.986686 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:43:32 crc kubenswrapper[4787]: E0129 14:43:32.988307 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:43:45 crc kubenswrapper[4787]: I0129 14:43:45.985972 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:43:45 crc kubenswrapper[4787]: E0129 14:43:45.987133 4787 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-q79sn_openshift-machine-config-operator(6311862b-6ca2-4dba-85e0-6829dd45c2db)\"" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" podUID="6311862b-6ca2-4dba-85e0-6829dd45c2db" Jan 29 14:44:00 crc kubenswrapper[4787]: I0129 14:44:00.986211 4787 scope.go:117] "RemoveContainer" containerID="9db764c126b33c1b4201fe2d156b46d034c0c5a6667e448ee4fdde543574f2eb" Jan 29 14:44:01 crc kubenswrapper[4787]: I0129 14:44:01.524261 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-q79sn" event={"ID":"6311862b-6ca2-4dba-85e0-6829dd45c2db","Type":"ContainerStarted","Data":"8b57391d0d5bcf4d8f1b0a3b74d382100eecbb9970544aa1653686393f2aa3eb"} Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.857271 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:38 crc kubenswrapper[4787]: E0129 14:44:38.858303 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="extract-content" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.858317 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="extract-content" Jan 29 14:44:38 crc kubenswrapper[4787]: E0129 14:44:38.858362 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="registry-server" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.858370 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="registry-server" Jan 29 14:44:38 crc kubenswrapper[4787]: E0129 14:44:38.858382 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="extract-utilities" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.858389 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="extract-utilities" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.858560 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="13f88697-a0c5-49cf-80f0-d3534b115e84" containerName="registry-server" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.859484 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.877737 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.935637 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.935733 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:38 crc kubenswrapper[4787]: I0129 14:44:38.935764 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zwwp\" (UniqueName: \"kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.036930 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.037041 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.037080 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zwwp\" (UniqueName: \"kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.038304 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.038806 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.066073 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zwwp\" (UniqueName: \"kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp\") pod \"redhat-operators-bwjnx\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.184776 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.624860 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.887797 4787 generic.go:334] "Generic (PLEG): container finished" podID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerID="fc32e562165cdb8e1ef434feb2fc244a154b8ce02fdda564eed5c25cb1d35e44" exitCode=0 Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.887847 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerDied","Data":"fc32e562165cdb8e1ef434feb2fc244a154b8ce02fdda564eed5c25cb1d35e44"} Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.887900 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerStarted","Data":"c85e21af9f1adc8f97fff39f9dcfedbb3ad38137733d37d2da5e005506e042bc"} Jan 29 14:44:39 crc kubenswrapper[4787]: I0129 14:44:39.890086 4787 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 29 14:44:41 crc kubenswrapper[4787]: I0129 14:44:41.906258 4787 generic.go:334] "Generic (PLEG): container finished" podID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerID="d14ea13d18963cb9747d2f17336944e193967556dde5279bb0b03a9b720ea285" exitCode=0 Jan 29 14:44:41 crc kubenswrapper[4787]: I0129 14:44:41.906383 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerDied","Data":"d14ea13d18963cb9747d2f17336944e193967556dde5279bb0b03a9b720ea285"} Jan 29 14:44:42 crc kubenswrapper[4787]: I0129 14:44:42.918469 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerStarted","Data":"0417abf34bd049a20325397e3a573cf9ca369635862e5fab9eb666d2eb46f725"} Jan 29 14:44:49 crc kubenswrapper[4787]: I0129 14:44:49.185008 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:49 crc kubenswrapper[4787]: I0129 14:44:49.185480 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:49 crc kubenswrapper[4787]: I0129 14:44:49.230574 4787 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:49 crc kubenswrapper[4787]: I0129 14:44:49.249715 4787 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bwjnx" podStartSLOduration=8.805967115 podStartE2EDuration="11.249699437s" podCreationTimestamp="2026-01-29 14:44:38 +0000 UTC" firstStartedPulling="2026-01-29 14:44:39.88979401 +0000 UTC m=+5318.651054296" lastFinishedPulling="2026-01-29 14:44:42.333526342 +0000 UTC m=+5321.094786618" observedRunningTime="2026-01-29 14:44:42.937759411 +0000 UTC m=+5321.699019687" watchObservedRunningTime="2026-01-29 14:44:49.249699437 +0000 UTC m=+5328.010959713" Jan 29 14:44:50 crc kubenswrapper[4787]: I0129 14:44:50.017212 4787 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:50 crc kubenswrapper[4787]: I0129 14:44:50.068920 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:51 crc kubenswrapper[4787]: I0129 14:44:51.993497 4787 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bwjnx" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="registry-server" containerID="cri-o://0417abf34bd049a20325397e3a573cf9ca369635862e5fab9eb666d2eb46f725" gracePeriod=2 Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.001739 4787 generic.go:334] "Generic (PLEG): container finished" podID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerID="0417abf34bd049a20325397e3a573cf9ca369635862e5fab9eb666d2eb46f725" exitCode=0 Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.002046 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerDied","Data":"0417abf34bd049a20325397e3a573cf9ca369635862e5fab9eb666d2eb46f725"} Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.084473 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.238572 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content\") pod \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.238656 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zwwp\" (UniqueName: \"kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp\") pod \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.238725 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities\") pod \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\" (UID: \"85c8f028-e94b-4e3b-b9fc-429a9388c8ea\") " Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.239693 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities" (OuterVolumeSpecName: "utilities") pod "85c8f028-e94b-4e3b-b9fc-429a9388c8ea" (UID: "85c8f028-e94b-4e3b-b9fc-429a9388c8ea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.243875 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp" (OuterVolumeSpecName: "kube-api-access-4zwwp") pod "85c8f028-e94b-4e3b-b9fc-429a9388c8ea" (UID: "85c8f028-e94b-4e3b-b9fc-429a9388c8ea"). InnerVolumeSpecName "kube-api-access-4zwwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.340037 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zwwp\" (UniqueName: \"kubernetes.io/projected/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-kube-api-access-4zwwp\") on node \"crc\" DevicePath \"\"" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.340079 4787 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-utilities\") on node \"crc\" DevicePath \"\"" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.355420 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85c8f028-e94b-4e3b-b9fc-429a9388c8ea" (UID: "85c8f028-e94b-4e3b-b9fc-429a9388c8ea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 29 14:44:53 crc kubenswrapper[4787]: I0129 14:44:53.440799 4787 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85c8f028-e94b-4e3b-b9fc-429a9388c8ea-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.013245 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwjnx" event={"ID":"85c8f028-e94b-4e3b-b9fc-429a9388c8ea","Type":"ContainerDied","Data":"c85e21af9f1adc8f97fff39f9dcfedbb3ad38137733d37d2da5e005506e042bc"} Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.013311 4787 scope.go:117] "RemoveContainer" containerID="0417abf34bd049a20325397e3a573cf9ca369635862e5fab9eb666d2eb46f725" Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.013327 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwjnx" Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.036163 4787 scope.go:117] "RemoveContainer" containerID="d14ea13d18963cb9747d2f17336944e193967556dde5279bb0b03a9b720ea285" Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.038984 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.047323 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bwjnx"] Jan 29 14:44:54 crc kubenswrapper[4787]: I0129 14:44:54.058024 4787 scope.go:117] "RemoveContainer" containerID="fc32e562165cdb8e1ef434feb2fc244a154b8ce02fdda564eed5c25cb1d35e44" Jan 29 14:44:55 crc kubenswrapper[4787]: I0129 14:44:55.999408 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" path="/var/lib/kubelet/pods/85c8f028-e94b-4e3b-b9fc-429a9388c8ea/volumes" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.151004 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp"] Jan 29 14:45:00 crc kubenswrapper[4787]: E0129 14:45:00.151922 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="extract-content" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.151941 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="extract-content" Jan 29 14:45:00 crc kubenswrapper[4787]: E0129 14:45:00.151963 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="extract-utilities" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.151971 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="extract-utilities" Jan 29 14:45:00 crc kubenswrapper[4787]: E0129 14:45:00.151999 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="registry-server" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.152008 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="registry-server" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.152207 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="85c8f028-e94b-4e3b-b9fc-429a9388c8ea" containerName="registry-server" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.153082 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.157326 4787 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.163513 4787 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.167301 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp"] Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.245155 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.245243 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knkpr\" (UniqueName: \"kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.245270 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.346707 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knkpr\" (UniqueName: \"kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.346773 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.346827 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.347688 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.359762 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.364289 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knkpr\" (UniqueName: \"kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr\") pod \"collect-profiles-29494965-2kfpp\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.478553 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:00 crc kubenswrapper[4787]: I0129 14:45:00.922542 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp"] Jan 29 14:45:01 crc kubenswrapper[4787]: I0129 14:45:01.088739 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" event={"ID":"cbe5d689-12a9-43d7-b930-2e7373be36f5","Type":"ContainerStarted","Data":"6931a1a0ebea71eb58ad53828d290dcc08cc7dc8d57ef1bac75618ad1d312b8b"} Jan 29 14:45:02 crc kubenswrapper[4787]: I0129 14:45:02.097338 4787 generic.go:334] "Generic (PLEG): container finished" podID="cbe5d689-12a9-43d7-b930-2e7373be36f5" containerID="215a9c7b0bdb08f46db967d22e192d6612e79b0281f1e8c2590a9b0a284dffb4" exitCode=0 Jan 29 14:45:02 crc kubenswrapper[4787]: I0129 14:45:02.097411 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" event={"ID":"cbe5d689-12a9-43d7-b930-2e7373be36f5","Type":"ContainerDied","Data":"215a9c7b0bdb08f46db967d22e192d6612e79b0281f1e8c2590a9b0a284dffb4"} Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.385962 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.417272 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knkpr\" (UniqueName: \"kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr\") pod \"cbe5d689-12a9-43d7-b930-2e7373be36f5\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.417361 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume\") pod \"cbe5d689-12a9-43d7-b930-2e7373be36f5\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.417514 4787 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume\") pod \"cbe5d689-12a9-43d7-b930-2e7373be36f5\" (UID: \"cbe5d689-12a9-43d7-b930-2e7373be36f5\") " Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.417851 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume" (OuterVolumeSpecName: "config-volume") pod "cbe5d689-12a9-43d7-b930-2e7373be36f5" (UID: "cbe5d689-12a9-43d7-b930-2e7373be36f5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.423335 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr" (OuterVolumeSpecName: "kube-api-access-knkpr") pod "cbe5d689-12a9-43d7-b930-2e7373be36f5" (UID: "cbe5d689-12a9-43d7-b930-2e7373be36f5"). InnerVolumeSpecName "kube-api-access-knkpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.423715 4787 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cbe5d689-12a9-43d7-b930-2e7373be36f5" (UID: "cbe5d689-12a9-43d7-b930-2e7373be36f5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.519361 4787 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knkpr\" (UniqueName: \"kubernetes.io/projected/cbe5d689-12a9-43d7-b930-2e7373be36f5-kube-api-access-knkpr\") on node \"crc\" DevicePath \"\"" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.519399 4787 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cbe5d689-12a9-43d7-b930-2e7373be36f5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:45:03 crc kubenswrapper[4787]: I0129 14:45:03.519413 4787 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cbe5d689-12a9-43d7-b930-2e7373be36f5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 29 14:45:04 crc kubenswrapper[4787]: I0129 14:45:04.113633 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" event={"ID":"cbe5d689-12a9-43d7-b930-2e7373be36f5","Type":"ContainerDied","Data":"6931a1a0ebea71eb58ad53828d290dcc08cc7dc8d57ef1bac75618ad1d312b8b"} Jan 29 14:45:04 crc kubenswrapper[4787]: I0129 14:45:04.113991 4787 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6931a1a0ebea71eb58ad53828d290dcc08cc7dc8d57ef1bac75618ad1d312b8b" Jan 29 14:45:04 crc kubenswrapper[4787]: I0129 14:45:04.113780 4787 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29494965-2kfpp" Jan 29 14:45:04 crc kubenswrapper[4787]: I0129 14:45:04.458211 4787 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr"] Jan 29 14:45:04 crc kubenswrapper[4787]: I0129 14:45:04.466831 4787 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29494920-q8kwr"] Jan 29 14:45:05 crc kubenswrapper[4787]: I0129 14:45:05.998872 4787 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc0b8909-40fc-44a7-8323-7cca90901efa" path="/var/lib/kubelet/pods/cc0b8909-40fc-44a7-8323-7cca90901efa/volumes" Jan 29 14:45:25 crc kubenswrapper[4787]: I0129 14:45:25.393373 4787 scope.go:117] "RemoveContainer" containerID="4089083518a7fd0f0fcfe4ff92eecaeade4b631506203f47362bb4d129b88db2" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.884925 4787 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lq7m8"] Jan 29 14:45:58 crc kubenswrapper[4787]: E0129 14:45:58.886570 4787 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbe5d689-12a9-43d7-b930-2e7373be36f5" containerName="collect-profiles" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.886605 4787 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbe5d689-12a9-43d7-b930-2e7373be36f5" containerName="collect-profiles" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.886966 4787 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbe5d689-12a9-43d7-b930-2e7373be36f5" containerName="collect-profiles" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.889110 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.904831 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lq7m8"] Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.990872 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-utilities\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.990922 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtj8b\" (UniqueName: \"kubernetes.io/projected/10e1feaa-f12e-4fe8-b53f-422920705829-kube-api-access-jtj8b\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:58 crc kubenswrapper[4787]: I0129 14:45:58.990949 4787 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-catalog-content\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.092284 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-utilities\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.092364 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtj8b\" (UniqueName: \"kubernetes.io/projected/10e1feaa-f12e-4fe8-b53f-422920705829-kube-api-access-jtj8b\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.092396 4787 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-catalog-content\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.093423 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-utilities\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.093570 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e1feaa-f12e-4fe8-b53f-422920705829-catalog-content\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.111937 4787 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtj8b\" (UniqueName: \"kubernetes.io/projected/10e1feaa-f12e-4fe8-b53f-422920705829-kube-api-access-jtj8b\") pod \"redhat-marketplace-lq7m8\" (UID: \"10e1feaa-f12e-4fe8-b53f-422920705829\") " pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.225138 4787 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lq7m8" Jan 29 14:45:59 crc kubenswrapper[4787]: I0129 14:45:59.703210 4787 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lq7m8"] Jan 29 14:46:00 crc kubenswrapper[4787]: I0129 14:46:00.581272 4787 generic.go:334] "Generic (PLEG): container finished" podID="10e1feaa-f12e-4fe8-b53f-422920705829" containerID="1fed886057809715a7c725be9bc3746b5bb0bc409d8d341a454613975645fa0a" exitCode=0 Jan 29 14:46:00 crc kubenswrapper[4787]: I0129 14:46:00.581364 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lq7m8" event={"ID":"10e1feaa-f12e-4fe8-b53f-422920705829","Type":"ContainerDied","Data":"1fed886057809715a7c725be9bc3746b5bb0bc409d8d341a454613975645fa0a"} Jan 29 14:46:00 crc kubenswrapper[4787]: I0129 14:46:00.583143 4787 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lq7m8" event={"ID":"10e1feaa-f12e-4fe8-b53f-422920705829","Type":"ContainerStarted","Data":"2cacfd3c195b113a0722e136cb1034846f7a01a72a2408f11a6a734e4898bb23"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136671262024456 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136671263017374 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136656266016525 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136656266015475 5ustar corecore